function_name
stringlengths
1
63
docstring
stringlengths
50
5.89k
masked_code
stringlengths
50
882k
implementation
stringlengths
169
12.9k
start_line
int32
1
14.6k
end_line
int32
16
14.6k
file_content
stringlengths
274
882k
add_collision_mesh
Add a collision mesh to the planning scene. Parameters ---------- collision_mesh : :class:`compas_fab.robots.CollisionMesh` Object containing the collision mesh to be added. options : dict, optional Unused parameter. Returns ------- ``None``
from __future__ import absolute_import from __future__ import division from __future__ import print_function from compas.utilities import await_callback from compas_fab.backends.interfaces import AddCollisionMesh from compas_fab.backends.ros.messages import ApplyPlanningSceneRequest from compas_fab.backends.ros.messages import ApplyPlanningSceneResponse from compas_fab.backends.ros.messages import CollisionObject from compas_fab.backends.ros.messages import PlanningScene from compas_fab.backends.ros.messages import PlanningSceneWorld from compas_fab.backends.ros.service_description import ServiceDescription __all__ = [ 'MoveItAddCollisionMesh', ] class MoveItAddCollisionMesh(AddCollisionMesh): """Callable to add a collision mesh to the planning scene.""" APPLY_PLANNING_SCENE = ServiceDescription('/apply_planning_scene', 'ApplyPlanningScene', ApplyPlanningSceneRequest, ApplyPlanningSceneResponse, ) def __init__(self, ros_client): self.ros_client = ros_client # MASKED: add_collision_mesh function (lines 31-49) def add_collision_mesh_async(self, callback, errback, collision_mesh): co = CollisionObject.from_collision_mesh(collision_mesh) co.operation = CollisionObject.ADD world = PlanningSceneWorld(collision_objects=[co]) scene = PlanningScene(world=world, is_diff=True) request = scene.to_request(self.ros_client.ros_distro) self.APPLY_PLANNING_SCENE(self.ros_client, request, callback, errback)
def add_collision_mesh(self, collision_mesh, options=None): """Add a collision mesh to the planning scene. Parameters ---------- collision_mesh : :class:`compas_fab.robots.CollisionMesh` Object containing the collision mesh to be added. options : dict, optional Unused parameter. Returns ------- ``None`` """ kwargs = {} kwargs['collision_mesh'] = collision_mesh kwargs['errback_name'] = 'errback' return await_callback(self.add_collision_mesh_async, **kwargs)
31
49
from __future__ import absolute_import from __future__ import division from __future__ import print_function from compas.utilities import await_callback from compas_fab.backends.interfaces import AddCollisionMesh from compas_fab.backends.ros.messages import ApplyPlanningSceneRequest from compas_fab.backends.ros.messages import ApplyPlanningSceneResponse from compas_fab.backends.ros.messages import CollisionObject from compas_fab.backends.ros.messages import PlanningScene from compas_fab.backends.ros.messages import PlanningSceneWorld from compas_fab.backends.ros.service_description import ServiceDescription __all__ = [ 'MoveItAddCollisionMesh', ] class MoveItAddCollisionMesh(AddCollisionMesh): """Callable to add a collision mesh to the planning scene.""" APPLY_PLANNING_SCENE = ServiceDescription('/apply_planning_scene', 'ApplyPlanningScene', ApplyPlanningSceneRequest, ApplyPlanningSceneResponse, ) def __init__(self, ros_client): self.ros_client = ros_client def add_collision_mesh(self, collision_mesh, options=None): """Add a collision mesh to the planning scene. Parameters ---------- collision_mesh : :class:`compas_fab.robots.CollisionMesh` Object containing the collision mesh to be added. options : dict, optional Unused parameter. Returns ------- ``None`` """ kwargs = {} kwargs['collision_mesh'] = collision_mesh kwargs['errback_name'] = 'errback' return await_callback(self.add_collision_mesh_async, **kwargs) def add_collision_mesh_async(self, callback, errback, collision_mesh): co = CollisionObject.from_collision_mesh(collision_mesh) co.operation = CollisionObject.ADD world = PlanningSceneWorld(collision_objects=[co]) scene = PlanningScene(world=world, is_diff=True) request = scene.to_request(self.ros_client.ros_distro) self.APPLY_PLANNING_SCENE(self.ros_client, request, callback, errback)
get_init_code
Gets initial latent codes as the start point for optimization. The input image is assumed to have already been preprocessed, meaning to have shape [self.G.image_channels, self.G.resolution, self.G.resolution], channel order `self.G.channel_order`, and pixel range [self.G.min_val, self.G.max_val].
# python 3.7 """Utility functions to invert a given image back to a latent code.""" from tqdm import tqdm import cv2 import numpy as np import torch from models.stylegan_generator import StyleGANGenerator from models.stylegan_encoder import StyleGANEncoder from models.perceptual_model import PerceptualModel __all__ = ['StyleGANInverter'] def _softplus(x): """Implements the softplus function.""" return torch.nn.functional.softplus(x, beta=1, threshold=10000) def _get_tensor_value(tensor): """Gets the value of a torch Tensor.""" return tensor.cpu().detach().numpy() class StyleGANInverter(object): """Defines the class for StyleGAN inversion. Even having the encoder, the output latent code is not good enough to recover the target image satisfyingly. To this end, this class optimize the latent code based on gradient descent algorithm. In the optimization process, following loss functions will be considered: (1) Pixel-wise reconstruction loss. (required) (2) Perceptual loss. (optional, but recommended) (3) Regularization loss from encoder. (optional, but recommended for in-domain inversion) NOTE: The encoder can be missing for inversion, in which case the latent code will be randomly initialized and the regularization loss will be ignored. """ def __init__(self, model_name, learning_rate=1e-2, iteration=100, reconstruction_loss_weight=1.0, perceptual_loss_weight=5e-5, regularization_loss_weight=2.0, logger=None): """Initializes the inverter. NOTE: Only Adam optimizer is supported in the optimization process. Args: model_name: Name of the model on which the inverted is based. The model should be first registered in `models/model_settings.py`. logger: Logger to record the log message. learning_rate: Learning rate for optimization. (default: 1e-2) iteration: Number of iterations for optimization. (default: 100) reconstruction_loss_weight: Weight for reconstruction loss. Should always be a positive number. (default: 1.0) perceptual_loss_weight: Weight for perceptual loss. 0 disables perceptual loss. (default: 5e-5) regularization_loss_weight: Weight for regularization loss from encoder. This is essential for in-domain inversion. However, this loss will automatically ignored if the generative model does not include a valid encoder. 0 disables regularization loss. (default: 2.0) """ self.logger = logger self.model_name = model_name self.gan_type = 'stylegan' self.G = StyleGANGenerator(self.model_name, self.logger) self.E = StyleGANEncoder(self.model_name, self.logger) self.F = PerceptualModel(min_val=self.G.min_val, max_val=self.G.max_val) self.encode_dim = [self.G.num_layers, self.G.w_space_dim] self.run_device = self.G.run_device assert list(self.encode_dim) == list(self.E.encode_dim) assert self.G.gan_type == self.gan_type assert self.E.gan_type == self.gan_type self.learning_rate = learning_rate self.iteration = iteration self.loss_pix_weight = reconstruction_loss_weight self.loss_feat_weight = perceptual_loss_weight self.loss_reg_weight = regularization_loss_weight assert self.loss_pix_weight > 0 def preprocess(self, image): """Preprocesses a single image. This function assumes the input numpy array is with shape [height, width, channel], channel order `RGB`, and pixel range [0, 255]. The returned image is with shape [channel, new_height, new_width], where `new_height` and `new_width` are specified by the given generative model. The channel order of returned image is also specified by the generative model. The pixel range is shifted to [min_val, max_val], where `min_val` and `max_val` are also specified by the generative model. """ if not isinstance(image, np.ndarray): raise ValueError(f'Input image should be with type `numpy.ndarray`!') if image.dtype != np.uint8: raise ValueError(f'Input image should be with dtype `numpy.uint8`!') if image.ndim != 3 or image.shape[2] not in [1, 3]: raise ValueError(f'Input should be with shape [height, width, channel], ' f'where channel equals to 1 or 3!\n' f'But {image.shape} is received!') if image.shape[2] == 1 and self.G.image_channels == 3: image = np.tile(image, (1, 1, 3)) if image.shape[2] != self.G.image_channels: raise ValueError(f'Number of channels of input image, which is ' f'{image.shape[2]}, is not supported by the current ' f'inverter, which requires {self.G.image_channels} ' f'channels!') if self.G.image_channels == 3 and self.G.channel_order == 'BGR': image = image[:, :, ::-1] if image.shape[1:3] != [self.G.resolution, self.G.resolution]: image = cv2.resize(image, (self.G.resolution, self.G.resolution)) image = image.astype(np.float32) image = image / 255.0 * (self.G.max_val - self.G.min_val) + self.G.min_val image = image.astype(np.float32).transpose(2, 0, 1) return image # MASKED: get_init_code function (lines 131-142) def invert(self, image, num_viz=0): """Inverts the given image to a latent code. Basically, this function is based on gradient descent algorithm. Args: image: Target image to invert, which is assumed to have already been preprocessed. num_viz: Number of intermediate outputs to visualize. (default: 0) Returns: A two-element tuple. First one is the inverted code. Second one is a list of intermediate results, where first image is the input image, second one is the reconstructed result from the initial latent code, remainings are from the optimization process every `self.iteration // num_viz` steps. """ x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False init_z = self.get_init_code(image) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, self.iteration + 1), leave=True) for step in pbar: loss = 0.0 # Reconstruction loss. x_rec = self.G.net.synthesis(z) loss_pix = torch.mean((x - x_rec) ** 2) loss = loss + loss_pix * self.loss_pix_weight log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' # Perceptual loss. if self.loss_feat_weight: x_feat = self.F.net(x) x_rec_feat = self.F.net(x_rec) loss_feat = torch.mean((x_feat - x_rec_feat) ** 2) loss = loss + loss_feat * self.loss_feat_weight log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' # Regularization loss. if self.loss_reg_weight: z_rec = self.E.net(x_rec).view(1, *self.encode_dim) loss_reg = torch.mean((z - z_rec) ** 2) loss = loss + loss_reg * self.loss_reg_weight log_message += f', loss_reg: {_get_tensor_value(loss_reg):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, ' f'lr: {self.learning_rate:.2e}, ' f'{log_message}') # Do optimization. optimizer.zero_grad() loss.backward() optimizer.step() if num_viz > 0 and step % (self.iteration // num_viz) == 0: viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return _get_tensor_value(z), viz_results def easy_invert(self, image, num_viz=0): """Wraps functions `preprocess()` and `invert()` together.""" return self.invert(self.preprocess(image), num_viz) def diffuse(self, target, context, center_x, center_y, crop_x, crop_y, num_viz=0): """Diffuses the target image to a context image. Basically, this function is a motified version of `self.invert()`. More concretely, the encoder regularizer is removed from the objectives and the reconstruction loss is computed from the masked region. Args: target: Target image (foreground). context: Context image (background). center_x: The x-coordinate of the crop center. center_y: The y-coordinate of the crop center. crop_x: The crop size along the x-axis. crop_y: The crop size along the y-axis. num_viz: Number of intermediate outputs to visualize. (default: 0) Returns: A two-element tuple. First one is the inverted code. Second one is a list of intermediate results, where first image is the direct copy-paste image, second one is the reconstructed result from the initial latent code, remainings are from the optimization process every `self.iteration // num_viz` steps. """ image_shape = (self.G.image_channels, self.G.resolution, self.G.resolution) mask = np.zeros((1, *image_shape), dtype=np.float32) xx = center_x - crop_x // 2 yy = center_y - crop_y // 2 mask[:, :, yy:yy + crop_y, xx:xx + crop_x] = 1.0 target = target[np.newaxis] context = context[np.newaxis] x = target * mask + context * (1 - mask) x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False mask = self.G.to_tensor(mask.astype(np.float32)) mask.requires_grad = False init_z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) init_z = init_z.astype(np.float32) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, self.iteration + 1), leave=True) for step in pbar: loss = 0.0 # Reconstruction loss. x_rec = self.G.net.synthesis(z) loss_pix = torch.mean(((x - x_rec) * mask) ** 2) loss = loss + loss_pix * self.loss_pix_weight log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' # Perceptual loss. if self.loss_feat_weight: x_feat = self.F.net(x * mask) x_rec_feat = self.F.net(x_rec * mask) loss_feat = torch.mean((x_feat - x_rec_feat) ** 2) loss = loss + loss_feat * self.loss_feat_weight log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, ' f'lr: {self.learning_rate:.2e}, ' f'{log_message}') # Do optimization. optimizer.zero_grad() loss.backward() optimizer.step() if num_viz > 0 and step % (self.iteration // num_viz) == 0: viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return _get_tensor_value(z), viz_results def easy_diffuse(self, target, context, *args, **kwargs): """Wraps functions `preprocess()` and `diffuse()` together.""" return self.diffuse(self.preprocess(target), self.preprocess(context), *args, **kwargs)
def get_init_code(self, image): """Gets initial latent codes as the start point for optimization. The input image is assumed to have already been preprocessed, meaning to have shape [self.G.image_channels, self.G.resolution, self.G.resolution], channel order `self.G.channel_order`, and pixel range [self.G.min_val, self.G.max_val]. """ x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) return z.astype(np.float32)
131
142
# python 3.7 """Utility functions to invert a given image back to a latent code.""" from tqdm import tqdm import cv2 import numpy as np import torch from models.stylegan_generator import StyleGANGenerator from models.stylegan_encoder import StyleGANEncoder from models.perceptual_model import PerceptualModel __all__ = ['StyleGANInverter'] def _softplus(x): """Implements the softplus function.""" return torch.nn.functional.softplus(x, beta=1, threshold=10000) def _get_tensor_value(tensor): """Gets the value of a torch Tensor.""" return tensor.cpu().detach().numpy() class StyleGANInverter(object): """Defines the class for StyleGAN inversion. Even having the encoder, the output latent code is not good enough to recover the target image satisfyingly. To this end, this class optimize the latent code based on gradient descent algorithm. In the optimization process, following loss functions will be considered: (1) Pixel-wise reconstruction loss. (required) (2) Perceptual loss. (optional, but recommended) (3) Regularization loss from encoder. (optional, but recommended for in-domain inversion) NOTE: The encoder can be missing for inversion, in which case the latent code will be randomly initialized and the regularization loss will be ignored. """ def __init__(self, model_name, learning_rate=1e-2, iteration=100, reconstruction_loss_weight=1.0, perceptual_loss_weight=5e-5, regularization_loss_weight=2.0, logger=None): """Initializes the inverter. NOTE: Only Adam optimizer is supported in the optimization process. Args: model_name: Name of the model on which the inverted is based. The model should be first registered in `models/model_settings.py`. logger: Logger to record the log message. learning_rate: Learning rate for optimization. (default: 1e-2) iteration: Number of iterations for optimization. (default: 100) reconstruction_loss_weight: Weight for reconstruction loss. Should always be a positive number. (default: 1.0) perceptual_loss_weight: Weight for perceptual loss. 0 disables perceptual loss. (default: 5e-5) regularization_loss_weight: Weight for regularization loss from encoder. This is essential for in-domain inversion. However, this loss will automatically ignored if the generative model does not include a valid encoder. 0 disables regularization loss. (default: 2.0) """ self.logger = logger self.model_name = model_name self.gan_type = 'stylegan' self.G = StyleGANGenerator(self.model_name, self.logger) self.E = StyleGANEncoder(self.model_name, self.logger) self.F = PerceptualModel(min_val=self.G.min_val, max_val=self.G.max_val) self.encode_dim = [self.G.num_layers, self.G.w_space_dim] self.run_device = self.G.run_device assert list(self.encode_dim) == list(self.E.encode_dim) assert self.G.gan_type == self.gan_type assert self.E.gan_type == self.gan_type self.learning_rate = learning_rate self.iteration = iteration self.loss_pix_weight = reconstruction_loss_weight self.loss_feat_weight = perceptual_loss_weight self.loss_reg_weight = regularization_loss_weight assert self.loss_pix_weight > 0 def preprocess(self, image): """Preprocesses a single image. This function assumes the input numpy array is with shape [height, width, channel], channel order `RGB`, and pixel range [0, 255]. The returned image is with shape [channel, new_height, new_width], where `new_height` and `new_width` are specified by the given generative model. The channel order of returned image is also specified by the generative model. The pixel range is shifted to [min_val, max_val], where `min_val` and `max_val` are also specified by the generative model. """ if not isinstance(image, np.ndarray): raise ValueError(f'Input image should be with type `numpy.ndarray`!') if image.dtype != np.uint8: raise ValueError(f'Input image should be with dtype `numpy.uint8`!') if image.ndim != 3 or image.shape[2] not in [1, 3]: raise ValueError(f'Input should be with shape [height, width, channel], ' f'where channel equals to 1 or 3!\n' f'But {image.shape} is received!') if image.shape[2] == 1 and self.G.image_channels == 3: image = np.tile(image, (1, 1, 3)) if image.shape[2] != self.G.image_channels: raise ValueError(f'Number of channels of input image, which is ' f'{image.shape[2]}, is not supported by the current ' f'inverter, which requires {self.G.image_channels} ' f'channels!') if self.G.image_channels == 3 and self.G.channel_order == 'BGR': image = image[:, :, ::-1] if image.shape[1:3] != [self.G.resolution, self.G.resolution]: image = cv2.resize(image, (self.G.resolution, self.G.resolution)) image = image.astype(np.float32) image = image / 255.0 * (self.G.max_val - self.G.min_val) + self.G.min_val image = image.astype(np.float32).transpose(2, 0, 1) return image def get_init_code(self, image): """Gets initial latent codes as the start point for optimization. The input image is assumed to have already been preprocessed, meaning to have shape [self.G.image_channels, self.G.resolution, self.G.resolution], channel order `self.G.channel_order`, and pixel range [self.G.min_val, self.G.max_val]. """ x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) return z.astype(np.float32) def invert(self, image, num_viz=0): """Inverts the given image to a latent code. Basically, this function is based on gradient descent algorithm. Args: image: Target image to invert, which is assumed to have already been preprocessed. num_viz: Number of intermediate outputs to visualize. (default: 0) Returns: A two-element tuple. First one is the inverted code. Second one is a list of intermediate results, where first image is the input image, second one is the reconstructed result from the initial latent code, remainings are from the optimization process every `self.iteration // num_viz` steps. """ x = image[np.newaxis] x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False init_z = self.get_init_code(image) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, self.iteration + 1), leave=True) for step in pbar: loss = 0.0 # Reconstruction loss. x_rec = self.G.net.synthesis(z) loss_pix = torch.mean((x - x_rec) ** 2) loss = loss + loss_pix * self.loss_pix_weight log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' # Perceptual loss. if self.loss_feat_weight: x_feat = self.F.net(x) x_rec_feat = self.F.net(x_rec) loss_feat = torch.mean((x_feat - x_rec_feat) ** 2) loss = loss + loss_feat * self.loss_feat_weight log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' # Regularization loss. if self.loss_reg_weight: z_rec = self.E.net(x_rec).view(1, *self.encode_dim) loss_reg = torch.mean((z - z_rec) ** 2) loss = loss + loss_reg * self.loss_reg_weight log_message += f', loss_reg: {_get_tensor_value(loss_reg):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, ' f'lr: {self.learning_rate:.2e}, ' f'{log_message}') # Do optimization. optimizer.zero_grad() loss.backward() optimizer.step() if num_viz > 0 and step % (self.iteration // num_viz) == 0: viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return _get_tensor_value(z), viz_results def easy_invert(self, image, num_viz=0): """Wraps functions `preprocess()` and `invert()` together.""" return self.invert(self.preprocess(image), num_viz) def diffuse(self, target, context, center_x, center_y, crop_x, crop_y, num_viz=0): """Diffuses the target image to a context image. Basically, this function is a motified version of `self.invert()`. More concretely, the encoder regularizer is removed from the objectives and the reconstruction loss is computed from the masked region. Args: target: Target image (foreground). context: Context image (background). center_x: The x-coordinate of the crop center. center_y: The y-coordinate of the crop center. crop_x: The crop size along the x-axis. crop_y: The crop size along the y-axis. num_viz: Number of intermediate outputs to visualize. (default: 0) Returns: A two-element tuple. First one is the inverted code. Second one is a list of intermediate results, where first image is the direct copy-paste image, second one is the reconstructed result from the initial latent code, remainings are from the optimization process every `self.iteration // num_viz` steps. """ image_shape = (self.G.image_channels, self.G.resolution, self.G.resolution) mask = np.zeros((1, *image_shape), dtype=np.float32) xx = center_x - crop_x // 2 yy = center_y - crop_y // 2 mask[:, :, yy:yy + crop_y, xx:xx + crop_x] = 1.0 target = target[np.newaxis] context = context[np.newaxis] x = target * mask + context * (1 - mask) x = self.G.to_tensor(x.astype(np.float32)) x.requires_grad = False mask = self.G.to_tensor(mask.astype(np.float32)) mask.requires_grad = False init_z = _get_tensor_value(self.E.net(x).view(1, *self.encode_dim)) init_z = init_z.astype(np.float32) z = torch.Tensor(init_z).to(self.run_device) z.requires_grad = True optimizer = torch.optim.Adam([z], lr=self.learning_rate) viz_results = [] viz_results.append(self.G.postprocess(_get_tensor_value(x))[0]) x_init_inv = self.G.net.synthesis(z) viz_results.append(self.G.postprocess(_get_tensor_value(x_init_inv))[0]) pbar = tqdm(range(1, self.iteration + 1), leave=True) for step in pbar: loss = 0.0 # Reconstruction loss. x_rec = self.G.net.synthesis(z) loss_pix = torch.mean(((x - x_rec) * mask) ** 2) loss = loss + loss_pix * self.loss_pix_weight log_message = f'loss_pix: {_get_tensor_value(loss_pix):.3f}' # Perceptual loss. if self.loss_feat_weight: x_feat = self.F.net(x * mask) x_rec_feat = self.F.net(x_rec * mask) loss_feat = torch.mean((x_feat - x_rec_feat) ** 2) loss = loss + loss_feat * self.loss_feat_weight log_message += f', loss_feat: {_get_tensor_value(loss_feat):.3f}' log_message += f', loss: {_get_tensor_value(loss):.3f}' pbar.set_description_str(log_message) if self.logger: self.logger.debug(f'Step: {step:05d}, ' f'lr: {self.learning_rate:.2e}, ' f'{log_message}') # Do optimization. optimizer.zero_grad() loss.backward() optimizer.step() if num_viz > 0 and step % (self.iteration // num_viz) == 0: viz_results.append(self.G.postprocess(_get_tensor_value(x_rec))[0]) return _get_tensor_value(z), viz_results def easy_diffuse(self, target, context, *args, **kwargs): """Wraps functions `preprocess()` and `diffuse()` together.""" return self.diffuse(self.preprocess(target), self.preprocess(context), *args, **kwargs)
state
A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future.
import asyncio import collections import math import signal import sys from functools import wraps class Spy(object): """Spy is the debugging system for farc. farc contains a handful of Spy.on_*() methods placed at useful locations in the framework. It is up to a Spy driver (such as the included VcdSpy) to implement the Spy.on_*() methods. The programmer calls Spy.enable_spy(<Spy implementation class>) to activate the Spy system; otherwise, Spy does nothing. Therefore, this class is designed so that calling Spy.anything() is inert unless the application first calls Spy.enable_spy() """ _actv_cls = None @staticmethod def enable_spy(spy_cls): """Sets the Spy to use the given class and calls its initializer. """ Spy._actv_cls = spy_cls spy_cls.init() def __getattr__(*args): """Returns 1) the enable_spy static method if requested by name, or 2) the attribute from the active class (if active class was set), or 3) a function that swallows any arguments and does nothing. """ if args[1] == "enable_spy": return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return lambda *x: None # Singleton pattern: # Turn Spy into an instance of itself so __getattribute__ works # on anyone who calls "import Spy; Spy.foo()" # This prevents Spy() from creating a new instance # and gives everyone who calls "import Spy" the same object Spy = Spy() class Signal(object): """An asynchronous stimulus that triggers reactions. A unique identifier that, along with a value, specifies an Event. p. 154 """ _registry = {} # signame:str to sigid:int _lookup = [] # sigid:int to signame:str @staticmethod def exists(signame): """Returns True if signame is in the Signal registry. """ return signame in Signal._registry @staticmethod def register(signame): """Registers the signame if it is not already registered. Returns the signal number for the signame. """ assert type(signame) is str if signame in Signal._registry: # TODO: emit warning that signal is already registered return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid def __getattr__(self, signame): assert type(signame) is str return Signal._registry[signame] # Singleton pattern: # Turn Signal into an instance of itself so getattr works. # This also prevents Signal() from creating a new instance. Signal = Signal() # Register the reserved (system) signals Signal.register("EMPTY") # 0 Signal.register("ENTRY") # 1 Signal.register("EXIT") # 2 Signal.register("INIT") # 3 # Signals that mirror POSIX signals Signal.register("SIGINT") # (i.e. Ctrl+C) Signal.register("SIGTERM") # (i.e. kill <pid>) Event = collections.namedtuple("Event", ["signal", "value"]) Event.__doc__ = """Events are a tuple of (signal, value) that are passed from one AHSM to another. Signals are defined in each AHSM's source code by name, but resolve to a unique number. Values are any python value, including containers that contain even more values. Each AHSM state (static method) accepts an Event as the parameter and handles the event based on its Signal.""" # Instantiate the reserved (system) events Event.EMPTY = Event(Signal.EMPTY, None) Event.ENTRY = Event(Signal.ENTRY, None) Event.EXIT = Event(Signal.EXIT, None) Event.INIT = Event(Signal.INIT, None) # Events for POSIX signals Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C) Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>) # The order of this tuple MUST match their respective signals Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT) class Hsm(object): """A Hierarchical State Machine (HSM). Full support for hierarchical state nesting. Guaranteed entry/exit action execution on arbitrary state transitions. Full support of nested initial transitions. Support for events with arbitrary parameters. """ # Every state handler must return one of these values RET_HANDLED = 0 RET_IGNORED = 1 RET_TRAN = 2 RET_SUPER = 3 def __init__(self,): """Sets this Hsm's current state to Hsm.top(), the default state and stores the given initial state. """ # self.state is the Hsm/act's current active state. # This instance variable references the message handler (method) # that will be called whenever a message is sent to this Hsm. # We initialize this to self.top, the default message handler self.state = self.top # Farc differs from QP here in that we hardcode # the initial state to be "_initial" self.initial_state = self._initial def _initial(self, event): """Raises a NotImplementedError to force the derived class to implement its own initial state. """ raise NotImplementedError # MASKED: state function (lines 168-183) # Helper functions to process reserved events through the current state @staticmethod def trig(me, state_func, signal): return state_func(me, Event.reserved[signal]) @staticmethod def enter(me, state_func): return state_func(me, Event.ENTRY) @staticmethod def exit(me, state_func): return state_func(me, Event.EXIT) # Other helper functions @staticmethod def handled(me, event): return Hsm.RET_HANDLED @staticmethod def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN @staticmethod def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158 @state def top(me, event): """This is the default state handler. This handler ignores all signals except the POSIX-like events, SIGINT/SIGTERM. Handling SIGINT/SIGTERM here causes the Exit path to be executed from the application's active state to top/here. The application may put something useful or nothing at all in the Exit path. """ # Handle the Posix-like events to force the HSM # to execute its Exit path all the way to the top if Event.SIGINT == event: return Hsm.RET_HANDLED if Event.SIGTERM == event: return Hsm.RET_HANDLED # All other events are quietly ignored return Hsm.RET_IGNORED # p. 165 @staticmethod def _perform_init_chain(me, current): """Act on the chain of initializations required starting from current. """ t = current while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN: # The state handles the INIT message and needs to make a transition. The # "top" state is special in that it does not handle INIT messages, so we # defer to me.initial_state in this case path = [] # Trace the path back to t via superstates while me.state != t: path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) # Restore the state to the target state me.state = path[0] assert len(path) < 32 # MAX_NEST_DEPTH # Perform ENTRY action for each state from current to the target path.reverse() # in-place for s in path: Hsm.enter(me, s) # The target state has now to be checked to see if it responds to the INIT message t = path[-1] # -1 because path was reversed return t @staticmethod def _perform_transition(me, source, target): # Handle the state transition from source to target in the HSM. s, t = source, target path = [t] if s == t: # Case (a), transition to self Hsm.exit(me,s) Hsm.enter(me,t) else: # Find parent of target Hsm.trig(me, t, Signal.EMPTY) t = me.state # t is now parent of target if s == t: # Case (b), source is parent of target Hsm.enter(me, path[0]) else: # Find parent of source Hsm.trig(me, s, Signal.EMPTY) if me.state == t: # Case (c), source and target share a parent Hsm.exit(me, s) Hsm.enter(me, path[0]) else: if me.state == path[0]: # Case (d), target is parent of source Hsm.exit(me, s) else: # Check if the source is an ancestor of the target (case (e)) lca_found = False path.append(t) # Populates path[1] t = me.state # t is now parent of source # Find and save ancestors of target into path # until we find the source or hit the top me.state = path[1] while me.state != Hsm.top: Hsm.trig(me, me.state, Signal.EMPTY) path.append(me.state) assert len(path) < 32 # MAX_NEST_DEPTH if me.state == s: lca_found = True break if lca_found: # This is case (e), enter states to get to target for st in reversed(path[:-1]): Hsm.enter(me, st) else: Hsm.exit(me, s) # Exit the source for cases (f), (g), (h) me.state = t # Start at parent of the source while me.state not in path: # Keep exiting up into superstates until we reach the LCA. # Depending on whether the EXIT signal is handled, we may also need # to send the EMPTY signal to make me.state climb to the superstate. if Hsm.exit(me, me.state) == Hsm.RET_HANDLED: Hsm.trig(me, me.state, Signal.EMPTY) t = me.state # Step into children until we enter the target for st in reversed(path[:path.index(t)]): Hsm.enter(me, st) @staticmethod def init(me, event = None): """Transitions to the initial state. Follows any INIT transitions from the inital state and performs ENTRY actions as it proceeds. Use this to pass any parameters to initialize the state machine. p. 172 """ # TODO: The initial state MUST transition to another state # The code that formerly did this was: # status = me.initial_state(me, event) # assert status == Hsm.RET_TRAN # But the above code is commented out so an Ahsm's _initial() # isn't executed twice. me.state = Hsm._perform_init_chain(me, Hsm.top) @staticmethod def dispatch(me, event): """Dispatches the given event to this Hsm. Follows the application's state transitions until the event is handled or top() is reached p. 174 """ Spy.on_hsm_dispatch_event(event) # Save the current state t = me.state # Proceed to superstates if event is not handled, we wish to find the superstate # (if any) that does handle the event and to record the path to that state exit_path = [] r = Hsm.RET_SUPER while r == Hsm.RET_SUPER: s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) # invoke state handler # We leave the while loop with s at the state which was able to respond # to the event, or to Hsm.top if none did Spy.on_hsm_dispatch_post(exit_path) # If the state handler for s requests a transition if r == Hsm.RET_TRAN: t = me.state # Store target of transition # Exit from the current state to the state s which handles # the transition. We do not exit from s=exit_path[-1] itself. for st in exit_path[:-1]: r = Hsm.exit(me, st) assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED) s = exit_path[-1] # Transition to t through the HSM Hsm._perform_transition(me, s, t) # Do initializations starting at t t = Hsm._perform_init_chain(me, t) # Restore the state me.state = t class Framework(object): """Framework is a composite class that holds: - the asyncio event loop - the registry of AHSMs - the set of TimeEvents - the handle to the next TimeEvent - the table subscriptions to events """ _event_loop = asyncio.get_event_loop() # The Framework maintains a registry of Ahsms in a list. _ahsm_registry = [] # The Framework maintains a dict of priorities in use # to prevent duplicates. # An Ahsm's priority is checked against this dict # within the Ahsm.start() method # when the Ahsm is added to the Framework. # The dict's key is the priority (integer) and the value is the Ahsm. _priority_dict = {} # The Framework maintains a group of TimeEvents in a dict. The next # expiration of the TimeEvent is the key and the event is the value. # Only the event with the next expiration time is scheduled for the # timeEventCallback(). As TimeEvents are added and removed, the scheduled # callback must be re-evaluated. Periodic TimeEvents should only have # one entry in the dict: the next expiration. The timeEventCallback() will # add a Periodic TimeEvent back into the dict with its next expiration. _time_events = {} # When a TimeEvent is scheduled for the timeEventCallback(), # a handle is kept so that the callback may be cancelled if necessary. _tm_event_handle = None # The Subscriber Table is a dictionary. The keys are signals. # The value for each key is a list of Ahsms that are subscribed to the # signal. An Ahsm may subscribe to a signal at any time during runtime. _subscriber_table = {} @staticmethod def post(event, act): """Posts the event to the given Ahsm's event queue. The argument, act, is an Ahsm instance. """ assert isinstance(act, Ahsm) act.postFIFO(event) @staticmethod def post_by_name(event, act_name): """Posts the event to the given Ahsm's event queue. The argument, act, is a string of the name of the class to which the event is sent. The event will post to all actors having the given classname. """ assert type(act_name) is str for act in Framework._ahsm_registry: if act.__class__.__name__ == act_name: act.postFIFO(event) @staticmethod def publish(event): """Posts the event to the message queue of every Ahsm that is subscribed to the event's signal. """ if event.signal in Framework._subscriber_table: for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def subscribe(signame, act): """Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already. """ sigid = Signal.register(signame) if sigid not in Framework._subscriber_table: Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act) @staticmethod def addTimeEvent(tm_event, delta): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) after the delay, delta. """ expiration = Framework._event_loop.time() + delta Framework.addTimeEventAt(tm_event, expiration) @staticmethod def addTimeEventAt(tm_event, abs_time): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) at the given absolute time (_event_loop.time()). """ assert tm_event not in Framework._time_events.values() Framework._insortTimeEvent(tm_event, abs_time) @staticmethod def _insortTimeEvent(tm_event, expiration): """Inserts a TimeEvent into the list of time events, sorted by the next expiration of the timer. If the expiration time matches an existing expiration, we add the smallest amount of time to the given expiration to avoid a key collision in the Dict and make the identically-timed events fire in a FIFO fashion. """ # If the event is to happen in the past, post it now now = Framework._event_loop.time() if expiration < now: tm_event.act.postFIFO(tm_event) # TODO: if periodic, need to schedule next? # If an event already occupies this expiration time, # increase this event's expiration by the smallest measurable amount while expiration in Framework._time_events.keys(): m, e = math.frexp(expiration) expiration = (m + sys.float_info.epsilon) * 2**e Framework._time_events[expiration] = tm_event # If this is the only active TimeEvent, schedule its callback if len(Framework._time_events) == 1: Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) # If there are other TimeEvents, # check if this one should replace the scheduled one else: if expiration < min(Framework._time_events.keys()): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) @staticmethod def removeTimeEvent(tm_event): """Removes the TimeEvent from the list of active time events. Cancels the TimeEvent's callback if there is one. Schedules the next event's callback if there is one. """ for k,v in Framework._time_events.items(): if v is tm_event: # If the event being removed is scheduled for callback, # cancel and schedule the next event if there is one if k == min(Framework._time_events.keys()): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if len(Framework._time_events) > 0: next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = \ Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break @staticmethod def timeEventCallback(tm_event, expiration): """The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events. """ assert expiration in Framework._time_events.keys(), ( "Exp:%d _time_events.keys():%s" % (expiration, Framework._time_events.keys())) # Remove this expired TimeEvent from the active list del Framework._time_events[expiration] Framework._tm_event_handle = None # Post the event to the target Ahsm tm_event.act.postFIFO(tm_event) # If this is a periodic time event, schedule its next expiration if tm_event.interval > 0: Framework._insortTimeEvent(tm_event, expiration + tm_event.interval) # If not set already and there are more events, set the next event callback if (Framework._tm_event_handle == None and len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def add(act): """Makes the framework aware of the given Ahsm. """ Framework._ahsm_registry.append(act) assert act.priority not in Framework._priority_dict, ( "Priority MUST be unique") Framework._priority_dict[act.priority] = act Spy.on_framework_add(act) @staticmethod def run(): """Dispatches an event to the highest priority Ahsm until all event queues are empty (i.e. Run To Completion). """ getPriority = lambda x : x.priority while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return @staticmethod def stop(): """EXITs all Ahsms and stops the event loop. """ # Disable the timer callback if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None # Post EXIT to all Ahsms for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) # Run to completion and stop the asyncio event loop Framework.run() Framework._event_loop.stop() Spy.on_framework_stop() @staticmethod def print_info(): """Prints the name and current state of each actor in the framework. Meant to be called when ctrl+T (SIGINFO/29) is issued. """ for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__) # Bind a useful set of POSIX signals to the handler # (ignore a NotImplementedError on Windows) try: _event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop()) _event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop()) _event_loop.add_signal_handler(29, print_info.__func__) except NotImplementedError: pass def run_forever(): """Runs the asyncio event loop with and ensures state machines are exited upon a KeyboardInterrupt. """ loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close() class Ahsm(Hsm): """An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO. Adds a priority, message queue and methods to work with the queue. """ def start(self, priority, initEvent=None): # must set the priority before Framework.add() which uses the priority self.priority = priority Framework.add(self) self.mq = collections.deque() self.init(self, initEvent) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) def postLIFO(self, evt): self.mq.append(evt) def postFIFO(self, evt): self.mq.appendleft(evt) def pop_msg(self,): return self.mq.pop() def has_msgs(self,): return len(self.mq) > 0 class TimeEvent(object): """TimeEvent is a composite class that contains an Event. A TimeEvent is created by the application and added to the Framework. The Framework then emits the event after the given delay. A one-shot TimeEvent is created by calling either postAt() or postIn(). A periodic TimeEvent is created by calling the postEvery() method. """ def __init__(self, signame): assert type(signame) == str self.signal = Signal.register(signame) self.value = None def postAt(self, act, abs_time): """Posts this TimeEvent to the given Ahsm at a specified time. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time) def postIn(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta) def postEvery(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta and every time delta thereafter until disarmed. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta) def disarm(self): """Removes this TimeEvent from the Framework's active time events. """ self.act = None Framework.removeTimeEvent(self) from .VcdSpy import VcdSpy
def state(func): """A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future. """ @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, "farc_state", True) return staticmethod(func_wrap)
168
183
import asyncio import collections import math import signal import sys from functools import wraps class Spy(object): """Spy is the debugging system for farc. farc contains a handful of Spy.on_*() methods placed at useful locations in the framework. It is up to a Spy driver (such as the included VcdSpy) to implement the Spy.on_*() methods. The programmer calls Spy.enable_spy(<Spy implementation class>) to activate the Spy system; otherwise, Spy does nothing. Therefore, this class is designed so that calling Spy.anything() is inert unless the application first calls Spy.enable_spy() """ _actv_cls = None @staticmethod def enable_spy(spy_cls): """Sets the Spy to use the given class and calls its initializer. """ Spy._actv_cls = spy_cls spy_cls.init() def __getattr__(*args): """Returns 1) the enable_spy static method if requested by name, or 2) the attribute from the active class (if active class was set), or 3) a function that swallows any arguments and does nothing. """ if args[1] == "enable_spy": return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return lambda *x: None # Singleton pattern: # Turn Spy into an instance of itself so __getattribute__ works # on anyone who calls "import Spy; Spy.foo()" # This prevents Spy() from creating a new instance # and gives everyone who calls "import Spy" the same object Spy = Spy() class Signal(object): """An asynchronous stimulus that triggers reactions. A unique identifier that, along with a value, specifies an Event. p. 154 """ _registry = {} # signame:str to sigid:int _lookup = [] # sigid:int to signame:str @staticmethod def exists(signame): """Returns True if signame is in the Signal registry. """ return signame in Signal._registry @staticmethod def register(signame): """Registers the signame if it is not already registered. Returns the signal number for the signame. """ assert type(signame) is str if signame in Signal._registry: # TODO: emit warning that signal is already registered return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid def __getattr__(self, signame): assert type(signame) is str return Signal._registry[signame] # Singleton pattern: # Turn Signal into an instance of itself so getattr works. # This also prevents Signal() from creating a new instance. Signal = Signal() # Register the reserved (system) signals Signal.register("EMPTY") # 0 Signal.register("ENTRY") # 1 Signal.register("EXIT") # 2 Signal.register("INIT") # 3 # Signals that mirror POSIX signals Signal.register("SIGINT") # (i.e. Ctrl+C) Signal.register("SIGTERM") # (i.e. kill <pid>) Event = collections.namedtuple("Event", ["signal", "value"]) Event.__doc__ = """Events are a tuple of (signal, value) that are passed from one AHSM to another. Signals are defined in each AHSM's source code by name, but resolve to a unique number. Values are any python value, including containers that contain even more values. Each AHSM state (static method) accepts an Event as the parameter and handles the event based on its Signal.""" # Instantiate the reserved (system) events Event.EMPTY = Event(Signal.EMPTY, None) Event.ENTRY = Event(Signal.ENTRY, None) Event.EXIT = Event(Signal.EXIT, None) Event.INIT = Event(Signal.INIT, None) # Events for POSIX signals Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C) Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>) # The order of this tuple MUST match their respective signals Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT) class Hsm(object): """A Hierarchical State Machine (HSM). Full support for hierarchical state nesting. Guaranteed entry/exit action execution on arbitrary state transitions. Full support of nested initial transitions. Support for events with arbitrary parameters. """ # Every state handler must return one of these values RET_HANDLED = 0 RET_IGNORED = 1 RET_TRAN = 2 RET_SUPER = 3 def __init__(self,): """Sets this Hsm's current state to Hsm.top(), the default state and stores the given initial state. """ # self.state is the Hsm/act's current active state. # This instance variable references the message handler (method) # that will be called whenever a message is sent to this Hsm. # We initialize this to self.top, the default message handler self.state = self.top # Farc differs from QP here in that we hardcode # the initial state to be "_initial" self.initial_state = self._initial def _initial(self, event): """Raises a NotImplementedError to force the derived class to implement its own initial state. """ raise NotImplementedError def state(func): """A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future. """ @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, "farc_state", True) return staticmethod(func_wrap) # Helper functions to process reserved events through the current state @staticmethod def trig(me, state_func, signal): return state_func(me, Event.reserved[signal]) @staticmethod def enter(me, state_func): return state_func(me, Event.ENTRY) @staticmethod def exit(me, state_func): return state_func(me, Event.EXIT) # Other helper functions @staticmethod def handled(me, event): return Hsm.RET_HANDLED @staticmethod def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN @staticmethod def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158 @state def top(me, event): """This is the default state handler. This handler ignores all signals except the POSIX-like events, SIGINT/SIGTERM. Handling SIGINT/SIGTERM here causes the Exit path to be executed from the application's active state to top/here. The application may put something useful or nothing at all in the Exit path. """ # Handle the Posix-like events to force the HSM # to execute its Exit path all the way to the top if Event.SIGINT == event: return Hsm.RET_HANDLED if Event.SIGTERM == event: return Hsm.RET_HANDLED # All other events are quietly ignored return Hsm.RET_IGNORED # p. 165 @staticmethod def _perform_init_chain(me, current): """Act on the chain of initializations required starting from current. """ t = current while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN: # The state handles the INIT message and needs to make a transition. The # "top" state is special in that it does not handle INIT messages, so we # defer to me.initial_state in this case path = [] # Trace the path back to t via superstates while me.state != t: path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) # Restore the state to the target state me.state = path[0] assert len(path) < 32 # MAX_NEST_DEPTH # Perform ENTRY action for each state from current to the target path.reverse() # in-place for s in path: Hsm.enter(me, s) # The target state has now to be checked to see if it responds to the INIT message t = path[-1] # -1 because path was reversed return t @staticmethod def _perform_transition(me, source, target): # Handle the state transition from source to target in the HSM. s, t = source, target path = [t] if s == t: # Case (a), transition to self Hsm.exit(me,s) Hsm.enter(me,t) else: # Find parent of target Hsm.trig(me, t, Signal.EMPTY) t = me.state # t is now parent of target if s == t: # Case (b), source is parent of target Hsm.enter(me, path[0]) else: # Find parent of source Hsm.trig(me, s, Signal.EMPTY) if me.state == t: # Case (c), source and target share a parent Hsm.exit(me, s) Hsm.enter(me, path[0]) else: if me.state == path[0]: # Case (d), target is parent of source Hsm.exit(me, s) else: # Check if the source is an ancestor of the target (case (e)) lca_found = False path.append(t) # Populates path[1] t = me.state # t is now parent of source # Find and save ancestors of target into path # until we find the source or hit the top me.state = path[1] while me.state != Hsm.top: Hsm.trig(me, me.state, Signal.EMPTY) path.append(me.state) assert len(path) < 32 # MAX_NEST_DEPTH if me.state == s: lca_found = True break if lca_found: # This is case (e), enter states to get to target for st in reversed(path[:-1]): Hsm.enter(me, st) else: Hsm.exit(me, s) # Exit the source for cases (f), (g), (h) me.state = t # Start at parent of the source while me.state not in path: # Keep exiting up into superstates until we reach the LCA. # Depending on whether the EXIT signal is handled, we may also need # to send the EMPTY signal to make me.state climb to the superstate. if Hsm.exit(me, me.state) == Hsm.RET_HANDLED: Hsm.trig(me, me.state, Signal.EMPTY) t = me.state # Step into children until we enter the target for st in reversed(path[:path.index(t)]): Hsm.enter(me, st) @staticmethod def init(me, event = None): """Transitions to the initial state. Follows any INIT transitions from the inital state and performs ENTRY actions as it proceeds. Use this to pass any parameters to initialize the state machine. p. 172 """ # TODO: The initial state MUST transition to another state # The code that formerly did this was: # status = me.initial_state(me, event) # assert status == Hsm.RET_TRAN # But the above code is commented out so an Ahsm's _initial() # isn't executed twice. me.state = Hsm._perform_init_chain(me, Hsm.top) @staticmethod def dispatch(me, event): """Dispatches the given event to this Hsm. Follows the application's state transitions until the event is handled or top() is reached p. 174 """ Spy.on_hsm_dispatch_event(event) # Save the current state t = me.state # Proceed to superstates if event is not handled, we wish to find the superstate # (if any) that does handle the event and to record the path to that state exit_path = [] r = Hsm.RET_SUPER while r == Hsm.RET_SUPER: s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) # invoke state handler # We leave the while loop with s at the state which was able to respond # to the event, or to Hsm.top if none did Spy.on_hsm_dispatch_post(exit_path) # If the state handler for s requests a transition if r == Hsm.RET_TRAN: t = me.state # Store target of transition # Exit from the current state to the state s which handles # the transition. We do not exit from s=exit_path[-1] itself. for st in exit_path[:-1]: r = Hsm.exit(me, st) assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED) s = exit_path[-1] # Transition to t through the HSM Hsm._perform_transition(me, s, t) # Do initializations starting at t t = Hsm._perform_init_chain(me, t) # Restore the state me.state = t class Framework(object): """Framework is a composite class that holds: - the asyncio event loop - the registry of AHSMs - the set of TimeEvents - the handle to the next TimeEvent - the table subscriptions to events """ _event_loop = asyncio.get_event_loop() # The Framework maintains a registry of Ahsms in a list. _ahsm_registry = [] # The Framework maintains a dict of priorities in use # to prevent duplicates. # An Ahsm's priority is checked against this dict # within the Ahsm.start() method # when the Ahsm is added to the Framework. # The dict's key is the priority (integer) and the value is the Ahsm. _priority_dict = {} # The Framework maintains a group of TimeEvents in a dict. The next # expiration of the TimeEvent is the key and the event is the value. # Only the event with the next expiration time is scheduled for the # timeEventCallback(). As TimeEvents are added and removed, the scheduled # callback must be re-evaluated. Periodic TimeEvents should only have # one entry in the dict: the next expiration. The timeEventCallback() will # add a Periodic TimeEvent back into the dict with its next expiration. _time_events = {} # When a TimeEvent is scheduled for the timeEventCallback(), # a handle is kept so that the callback may be cancelled if necessary. _tm_event_handle = None # The Subscriber Table is a dictionary. The keys are signals. # The value for each key is a list of Ahsms that are subscribed to the # signal. An Ahsm may subscribe to a signal at any time during runtime. _subscriber_table = {} @staticmethod def post(event, act): """Posts the event to the given Ahsm's event queue. The argument, act, is an Ahsm instance. """ assert isinstance(act, Ahsm) act.postFIFO(event) @staticmethod def post_by_name(event, act_name): """Posts the event to the given Ahsm's event queue. The argument, act, is a string of the name of the class to which the event is sent. The event will post to all actors having the given classname. """ assert type(act_name) is str for act in Framework._ahsm_registry: if act.__class__.__name__ == act_name: act.postFIFO(event) @staticmethod def publish(event): """Posts the event to the message queue of every Ahsm that is subscribed to the event's signal. """ if event.signal in Framework._subscriber_table: for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def subscribe(signame, act): """Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already. """ sigid = Signal.register(signame) if sigid not in Framework._subscriber_table: Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act) @staticmethod def addTimeEvent(tm_event, delta): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) after the delay, delta. """ expiration = Framework._event_loop.time() + delta Framework.addTimeEventAt(tm_event, expiration) @staticmethod def addTimeEventAt(tm_event, abs_time): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) at the given absolute time (_event_loop.time()). """ assert tm_event not in Framework._time_events.values() Framework._insortTimeEvent(tm_event, abs_time) @staticmethod def _insortTimeEvent(tm_event, expiration): """Inserts a TimeEvent into the list of time events, sorted by the next expiration of the timer. If the expiration time matches an existing expiration, we add the smallest amount of time to the given expiration to avoid a key collision in the Dict and make the identically-timed events fire in a FIFO fashion. """ # If the event is to happen in the past, post it now now = Framework._event_loop.time() if expiration < now: tm_event.act.postFIFO(tm_event) # TODO: if periodic, need to schedule next? # If an event already occupies this expiration time, # increase this event's expiration by the smallest measurable amount while expiration in Framework._time_events.keys(): m, e = math.frexp(expiration) expiration = (m + sys.float_info.epsilon) * 2**e Framework._time_events[expiration] = tm_event # If this is the only active TimeEvent, schedule its callback if len(Framework._time_events) == 1: Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) # If there are other TimeEvents, # check if this one should replace the scheduled one else: if expiration < min(Framework._time_events.keys()): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) @staticmethod def removeTimeEvent(tm_event): """Removes the TimeEvent from the list of active time events. Cancels the TimeEvent's callback if there is one. Schedules the next event's callback if there is one. """ for k,v in Framework._time_events.items(): if v is tm_event: # If the event being removed is scheduled for callback, # cancel and schedule the next event if there is one if k == min(Framework._time_events.keys()): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if len(Framework._time_events) > 0: next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = \ Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break @staticmethod def timeEventCallback(tm_event, expiration): """The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events. """ assert expiration in Framework._time_events.keys(), ( "Exp:%d _time_events.keys():%s" % (expiration, Framework._time_events.keys())) # Remove this expired TimeEvent from the active list del Framework._time_events[expiration] Framework._tm_event_handle = None # Post the event to the target Ahsm tm_event.act.postFIFO(tm_event) # If this is a periodic time event, schedule its next expiration if tm_event.interval > 0: Framework._insortTimeEvent(tm_event, expiration + tm_event.interval) # If not set already and there are more events, set the next event callback if (Framework._tm_event_handle == None and len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def add(act): """Makes the framework aware of the given Ahsm. """ Framework._ahsm_registry.append(act) assert act.priority not in Framework._priority_dict, ( "Priority MUST be unique") Framework._priority_dict[act.priority] = act Spy.on_framework_add(act) @staticmethod def run(): """Dispatches an event to the highest priority Ahsm until all event queues are empty (i.e. Run To Completion). """ getPriority = lambda x : x.priority while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return @staticmethod def stop(): """EXITs all Ahsms and stops the event loop. """ # Disable the timer callback if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None # Post EXIT to all Ahsms for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) # Run to completion and stop the asyncio event loop Framework.run() Framework._event_loop.stop() Spy.on_framework_stop() @staticmethod def print_info(): """Prints the name and current state of each actor in the framework. Meant to be called when ctrl+T (SIGINFO/29) is issued. """ for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__) # Bind a useful set of POSIX signals to the handler # (ignore a NotImplementedError on Windows) try: _event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop()) _event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop()) _event_loop.add_signal_handler(29, print_info.__func__) except NotImplementedError: pass def run_forever(): """Runs the asyncio event loop with and ensures state machines are exited upon a KeyboardInterrupt. """ loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close() class Ahsm(Hsm): """An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO. Adds a priority, message queue and methods to work with the queue. """ def start(self, priority, initEvent=None): # must set the priority before Framework.add() which uses the priority self.priority = priority Framework.add(self) self.mq = collections.deque() self.init(self, initEvent) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) def postLIFO(self, evt): self.mq.append(evt) def postFIFO(self, evt): self.mq.appendleft(evt) def pop_msg(self,): return self.mq.pop() def has_msgs(self,): return len(self.mq) > 0 class TimeEvent(object): """TimeEvent is a composite class that contains an Event. A TimeEvent is created by the application and added to the Framework. The Framework then emits the event after the given delay. A one-shot TimeEvent is created by calling either postAt() or postIn(). A periodic TimeEvent is created by calling the postEvery() method. """ def __init__(self, signame): assert type(signame) == str self.signal = Signal.register(signame) self.value = None def postAt(self, act, abs_time): """Posts this TimeEvent to the given Ahsm at a specified time. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time) def postIn(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta) def postEvery(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta and every time delta thereafter until disarmed. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta) def disarm(self): """Removes this TimeEvent from the Framework's active time events. """ self.act = None Framework.removeTimeEvent(self) from .VcdSpy import VcdSpy
subscribe
Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already.
import asyncio import collections import math import signal import sys from functools import wraps class Spy(object): """Spy is the debugging system for farc. farc contains a handful of Spy.on_*() methods placed at useful locations in the framework. It is up to a Spy driver (such as the included VcdSpy) to implement the Spy.on_*() methods. The programmer calls Spy.enable_spy(<Spy implementation class>) to activate the Spy system; otherwise, Spy does nothing. Therefore, this class is designed so that calling Spy.anything() is inert unless the application first calls Spy.enable_spy() """ _actv_cls = None @staticmethod def enable_spy(spy_cls): """Sets the Spy to use the given class and calls its initializer. """ Spy._actv_cls = spy_cls spy_cls.init() def __getattr__(*args): """Returns 1) the enable_spy static method if requested by name, or 2) the attribute from the active class (if active class was set), or 3) a function that swallows any arguments and does nothing. """ if args[1] == "enable_spy": return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return lambda *x: None # Singleton pattern: # Turn Spy into an instance of itself so __getattribute__ works # on anyone who calls "import Spy; Spy.foo()" # This prevents Spy() from creating a new instance # and gives everyone who calls "import Spy" the same object Spy = Spy() class Signal(object): """An asynchronous stimulus that triggers reactions. A unique identifier that, along with a value, specifies an Event. p. 154 """ _registry = {} # signame:str to sigid:int _lookup = [] # sigid:int to signame:str @staticmethod def exists(signame): """Returns True if signame is in the Signal registry. """ return signame in Signal._registry @staticmethod def register(signame): """Registers the signame if it is not already registered. Returns the signal number for the signame. """ assert type(signame) is str if signame in Signal._registry: # TODO: emit warning that signal is already registered return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid def __getattr__(self, signame): assert type(signame) is str return Signal._registry[signame] # Singleton pattern: # Turn Signal into an instance of itself so getattr works. # This also prevents Signal() from creating a new instance. Signal = Signal() # Register the reserved (system) signals Signal.register("EMPTY") # 0 Signal.register("ENTRY") # 1 Signal.register("EXIT") # 2 Signal.register("INIT") # 3 # Signals that mirror POSIX signals Signal.register("SIGINT") # (i.e. Ctrl+C) Signal.register("SIGTERM") # (i.e. kill <pid>) Event = collections.namedtuple("Event", ["signal", "value"]) Event.__doc__ = """Events are a tuple of (signal, value) that are passed from one AHSM to another. Signals are defined in each AHSM's source code by name, but resolve to a unique number. Values are any python value, including containers that contain even more values. Each AHSM state (static method) accepts an Event as the parameter and handles the event based on its Signal.""" # Instantiate the reserved (system) events Event.EMPTY = Event(Signal.EMPTY, None) Event.ENTRY = Event(Signal.ENTRY, None) Event.EXIT = Event(Signal.EXIT, None) Event.INIT = Event(Signal.INIT, None) # Events for POSIX signals Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C) Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>) # The order of this tuple MUST match their respective signals Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT) class Hsm(object): """A Hierarchical State Machine (HSM). Full support for hierarchical state nesting. Guaranteed entry/exit action execution on arbitrary state transitions. Full support of nested initial transitions. Support for events with arbitrary parameters. """ # Every state handler must return one of these values RET_HANDLED = 0 RET_IGNORED = 1 RET_TRAN = 2 RET_SUPER = 3 def __init__(self,): """Sets this Hsm's current state to Hsm.top(), the default state and stores the given initial state. """ # self.state is the Hsm/act's current active state. # This instance variable references the message handler (method) # that will be called whenever a message is sent to this Hsm. # We initialize this to self.top, the default message handler self.state = self.top # Farc differs from QP here in that we hardcode # the initial state to be "_initial" self.initial_state = self._initial def _initial(self, event): """Raises a NotImplementedError to force the derived class to implement its own initial state. """ raise NotImplementedError def state(func): """A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future. """ @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, "farc_state", True) return staticmethod(func_wrap) # Helper functions to process reserved events through the current state @staticmethod def trig(me, state_func, signal): return state_func(me, Event.reserved[signal]) @staticmethod def enter(me, state_func): return state_func(me, Event.ENTRY) @staticmethod def exit(me, state_func): return state_func(me, Event.EXIT) # Other helper functions @staticmethod def handled(me, event): return Hsm.RET_HANDLED @staticmethod def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN @staticmethod def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158 @state def top(me, event): """This is the default state handler. This handler ignores all signals except the POSIX-like events, SIGINT/SIGTERM. Handling SIGINT/SIGTERM here causes the Exit path to be executed from the application's active state to top/here. The application may put something useful or nothing at all in the Exit path. """ # Handle the Posix-like events to force the HSM # to execute its Exit path all the way to the top if Event.SIGINT == event: return Hsm.RET_HANDLED if Event.SIGTERM == event: return Hsm.RET_HANDLED # All other events are quietly ignored return Hsm.RET_IGNORED # p. 165 @staticmethod def _perform_init_chain(me, current): """Act on the chain of initializations required starting from current. """ t = current while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN: # The state handles the INIT message and needs to make a transition. The # "top" state is special in that it does not handle INIT messages, so we # defer to me.initial_state in this case path = [] # Trace the path back to t via superstates while me.state != t: path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) # Restore the state to the target state me.state = path[0] assert len(path) < 32 # MAX_NEST_DEPTH # Perform ENTRY action for each state from current to the target path.reverse() # in-place for s in path: Hsm.enter(me, s) # The target state has now to be checked to see if it responds to the INIT message t = path[-1] # -1 because path was reversed return t @staticmethod def _perform_transition(me, source, target): # Handle the state transition from source to target in the HSM. s, t = source, target path = [t] if s == t: # Case (a), transition to self Hsm.exit(me,s) Hsm.enter(me,t) else: # Find parent of target Hsm.trig(me, t, Signal.EMPTY) t = me.state # t is now parent of target if s == t: # Case (b), source is parent of target Hsm.enter(me, path[0]) else: # Find parent of source Hsm.trig(me, s, Signal.EMPTY) if me.state == t: # Case (c), source and target share a parent Hsm.exit(me, s) Hsm.enter(me, path[0]) else: if me.state == path[0]: # Case (d), target is parent of source Hsm.exit(me, s) else: # Check if the source is an ancestor of the target (case (e)) lca_found = False path.append(t) # Populates path[1] t = me.state # t is now parent of source # Find and save ancestors of target into path # until we find the source or hit the top me.state = path[1] while me.state != Hsm.top: Hsm.trig(me, me.state, Signal.EMPTY) path.append(me.state) assert len(path) < 32 # MAX_NEST_DEPTH if me.state == s: lca_found = True break if lca_found: # This is case (e), enter states to get to target for st in reversed(path[:-1]): Hsm.enter(me, st) else: Hsm.exit(me, s) # Exit the source for cases (f), (g), (h) me.state = t # Start at parent of the source while me.state not in path: # Keep exiting up into superstates until we reach the LCA. # Depending on whether the EXIT signal is handled, we may also need # to send the EMPTY signal to make me.state climb to the superstate. if Hsm.exit(me, me.state) == Hsm.RET_HANDLED: Hsm.trig(me, me.state, Signal.EMPTY) t = me.state # Step into children until we enter the target for st in reversed(path[:path.index(t)]): Hsm.enter(me, st) @staticmethod def init(me, event = None): """Transitions to the initial state. Follows any INIT transitions from the inital state and performs ENTRY actions as it proceeds. Use this to pass any parameters to initialize the state machine. p. 172 """ # TODO: The initial state MUST transition to another state # The code that formerly did this was: # status = me.initial_state(me, event) # assert status == Hsm.RET_TRAN # But the above code is commented out so an Ahsm's _initial() # isn't executed twice. me.state = Hsm._perform_init_chain(me, Hsm.top) @staticmethod def dispatch(me, event): """Dispatches the given event to this Hsm. Follows the application's state transitions until the event is handled or top() is reached p. 174 """ Spy.on_hsm_dispatch_event(event) # Save the current state t = me.state # Proceed to superstates if event is not handled, we wish to find the superstate # (if any) that does handle the event and to record the path to that state exit_path = [] r = Hsm.RET_SUPER while r == Hsm.RET_SUPER: s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) # invoke state handler # We leave the while loop with s at the state which was able to respond # to the event, or to Hsm.top if none did Spy.on_hsm_dispatch_post(exit_path) # If the state handler for s requests a transition if r == Hsm.RET_TRAN: t = me.state # Store target of transition # Exit from the current state to the state s which handles # the transition. We do not exit from s=exit_path[-1] itself. for st in exit_path[:-1]: r = Hsm.exit(me, st) assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED) s = exit_path[-1] # Transition to t through the HSM Hsm._perform_transition(me, s, t) # Do initializations starting at t t = Hsm._perform_init_chain(me, t) # Restore the state me.state = t class Framework(object): """Framework is a composite class that holds: - the asyncio event loop - the registry of AHSMs - the set of TimeEvents - the handle to the next TimeEvent - the table subscriptions to events """ _event_loop = asyncio.get_event_loop() # The Framework maintains a registry of Ahsms in a list. _ahsm_registry = [] # The Framework maintains a dict of priorities in use # to prevent duplicates. # An Ahsm's priority is checked against this dict # within the Ahsm.start() method # when the Ahsm is added to the Framework. # The dict's key is the priority (integer) and the value is the Ahsm. _priority_dict = {} # The Framework maintains a group of TimeEvents in a dict. The next # expiration of the TimeEvent is the key and the event is the value. # Only the event with the next expiration time is scheduled for the # timeEventCallback(). As TimeEvents are added and removed, the scheduled # callback must be re-evaluated. Periodic TimeEvents should only have # one entry in the dict: the next expiration. The timeEventCallback() will # add a Periodic TimeEvent back into the dict with its next expiration. _time_events = {} # When a TimeEvent is scheduled for the timeEventCallback(), # a handle is kept so that the callback may be cancelled if necessary. _tm_event_handle = None # The Subscriber Table is a dictionary. The keys are signals. # The value for each key is a list of Ahsms that are subscribed to the # signal. An Ahsm may subscribe to a signal at any time during runtime. _subscriber_table = {} @staticmethod def post(event, act): """Posts the event to the given Ahsm's event queue. The argument, act, is an Ahsm instance. """ assert isinstance(act, Ahsm) act.postFIFO(event) @staticmethod def post_by_name(event, act_name): """Posts the event to the given Ahsm's event queue. The argument, act, is a string of the name of the class to which the event is sent. The event will post to all actors having the given classname. """ assert type(act_name) is str for act in Framework._ahsm_registry: if act.__class__.__name__ == act_name: act.postFIFO(event) @staticmethod def publish(event): """Posts the event to the message queue of every Ahsm that is subscribed to the event's signal. """ if event.signal in Framework._subscriber_table: for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) # MASKED: subscribe function (lines 439-449) @staticmethod def addTimeEvent(tm_event, delta): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) after the delay, delta. """ expiration = Framework._event_loop.time() + delta Framework.addTimeEventAt(tm_event, expiration) @staticmethod def addTimeEventAt(tm_event, abs_time): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) at the given absolute time (_event_loop.time()). """ assert tm_event not in Framework._time_events.values() Framework._insortTimeEvent(tm_event, abs_time) @staticmethod def _insortTimeEvent(tm_event, expiration): """Inserts a TimeEvent into the list of time events, sorted by the next expiration of the timer. If the expiration time matches an existing expiration, we add the smallest amount of time to the given expiration to avoid a key collision in the Dict and make the identically-timed events fire in a FIFO fashion. """ # If the event is to happen in the past, post it now now = Framework._event_loop.time() if expiration < now: tm_event.act.postFIFO(tm_event) # TODO: if periodic, need to schedule next? # If an event already occupies this expiration time, # increase this event's expiration by the smallest measurable amount while expiration in Framework._time_events.keys(): m, e = math.frexp(expiration) expiration = (m + sys.float_info.epsilon) * 2**e Framework._time_events[expiration] = tm_event # If this is the only active TimeEvent, schedule its callback if len(Framework._time_events) == 1: Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) # If there are other TimeEvents, # check if this one should replace the scheduled one else: if expiration < min(Framework._time_events.keys()): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) @staticmethod def removeTimeEvent(tm_event): """Removes the TimeEvent from the list of active time events. Cancels the TimeEvent's callback if there is one. Schedules the next event's callback if there is one. """ for k,v in Framework._time_events.items(): if v is tm_event: # If the event being removed is scheduled for callback, # cancel and schedule the next event if there is one if k == min(Framework._time_events.keys()): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if len(Framework._time_events) > 0: next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = \ Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break @staticmethod def timeEventCallback(tm_event, expiration): """The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events. """ assert expiration in Framework._time_events.keys(), ( "Exp:%d _time_events.keys():%s" % (expiration, Framework._time_events.keys())) # Remove this expired TimeEvent from the active list del Framework._time_events[expiration] Framework._tm_event_handle = None # Post the event to the target Ahsm tm_event.act.postFIFO(tm_event) # If this is a periodic time event, schedule its next expiration if tm_event.interval > 0: Framework._insortTimeEvent(tm_event, expiration + tm_event.interval) # If not set already and there are more events, set the next event callback if (Framework._tm_event_handle == None and len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def add(act): """Makes the framework aware of the given Ahsm. """ Framework._ahsm_registry.append(act) assert act.priority not in Framework._priority_dict, ( "Priority MUST be unique") Framework._priority_dict[act.priority] = act Spy.on_framework_add(act) @staticmethod def run(): """Dispatches an event to the highest priority Ahsm until all event queues are empty (i.e. Run To Completion). """ getPriority = lambda x : x.priority while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return @staticmethod def stop(): """EXITs all Ahsms and stops the event loop. """ # Disable the timer callback if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None # Post EXIT to all Ahsms for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) # Run to completion and stop the asyncio event loop Framework.run() Framework._event_loop.stop() Spy.on_framework_stop() @staticmethod def print_info(): """Prints the name and current state of each actor in the framework. Meant to be called when ctrl+T (SIGINFO/29) is issued. """ for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__) # Bind a useful set of POSIX signals to the handler # (ignore a NotImplementedError on Windows) try: _event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop()) _event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop()) _event_loop.add_signal_handler(29, print_info.__func__) except NotImplementedError: pass def run_forever(): """Runs the asyncio event loop with and ensures state machines are exited upon a KeyboardInterrupt. """ loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close() class Ahsm(Hsm): """An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO. Adds a priority, message queue and methods to work with the queue. """ def start(self, priority, initEvent=None): # must set the priority before Framework.add() which uses the priority self.priority = priority Framework.add(self) self.mq = collections.deque() self.init(self, initEvent) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) def postLIFO(self, evt): self.mq.append(evt) def postFIFO(self, evt): self.mq.appendleft(evt) def pop_msg(self,): return self.mq.pop() def has_msgs(self,): return len(self.mq) > 0 class TimeEvent(object): """TimeEvent is a composite class that contains an Event. A TimeEvent is created by the application and added to the Framework. The Framework then emits the event after the given delay. A one-shot TimeEvent is created by calling either postAt() or postIn(). A periodic TimeEvent is created by calling the postEvery() method. """ def __init__(self, signame): assert type(signame) == str self.signal = Signal.register(signame) self.value = None def postAt(self, act, abs_time): """Posts this TimeEvent to the given Ahsm at a specified time. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time) def postIn(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta) def postEvery(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta and every time delta thereafter until disarmed. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta) def disarm(self): """Removes this TimeEvent from the Framework's active time events. """ self.act = None Framework.removeTimeEvent(self) from .VcdSpy import VcdSpy
@staticmethod def subscribe(signame, act): """Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already. """ sigid = Signal.register(signame) if sigid not in Framework._subscriber_table: Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act)
439
449
import asyncio import collections import math import signal import sys from functools import wraps class Spy(object): """Spy is the debugging system for farc. farc contains a handful of Spy.on_*() methods placed at useful locations in the framework. It is up to a Spy driver (such as the included VcdSpy) to implement the Spy.on_*() methods. The programmer calls Spy.enable_spy(<Spy implementation class>) to activate the Spy system; otherwise, Spy does nothing. Therefore, this class is designed so that calling Spy.anything() is inert unless the application first calls Spy.enable_spy() """ _actv_cls = None @staticmethod def enable_spy(spy_cls): """Sets the Spy to use the given class and calls its initializer. """ Spy._actv_cls = spy_cls spy_cls.init() def __getattr__(*args): """Returns 1) the enable_spy static method if requested by name, or 2) the attribute from the active class (if active class was set), or 3) a function that swallows any arguments and does nothing. """ if args[1] == "enable_spy": return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return lambda *x: None # Singleton pattern: # Turn Spy into an instance of itself so __getattribute__ works # on anyone who calls "import Spy; Spy.foo()" # This prevents Spy() from creating a new instance # and gives everyone who calls "import Spy" the same object Spy = Spy() class Signal(object): """An asynchronous stimulus that triggers reactions. A unique identifier that, along with a value, specifies an Event. p. 154 """ _registry = {} # signame:str to sigid:int _lookup = [] # sigid:int to signame:str @staticmethod def exists(signame): """Returns True if signame is in the Signal registry. """ return signame in Signal._registry @staticmethod def register(signame): """Registers the signame if it is not already registered. Returns the signal number for the signame. """ assert type(signame) is str if signame in Signal._registry: # TODO: emit warning that signal is already registered return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid def __getattr__(self, signame): assert type(signame) is str return Signal._registry[signame] # Singleton pattern: # Turn Signal into an instance of itself so getattr works. # This also prevents Signal() from creating a new instance. Signal = Signal() # Register the reserved (system) signals Signal.register("EMPTY") # 0 Signal.register("ENTRY") # 1 Signal.register("EXIT") # 2 Signal.register("INIT") # 3 # Signals that mirror POSIX signals Signal.register("SIGINT") # (i.e. Ctrl+C) Signal.register("SIGTERM") # (i.e. kill <pid>) Event = collections.namedtuple("Event", ["signal", "value"]) Event.__doc__ = """Events are a tuple of (signal, value) that are passed from one AHSM to another. Signals are defined in each AHSM's source code by name, but resolve to a unique number. Values are any python value, including containers that contain even more values. Each AHSM state (static method) accepts an Event as the parameter and handles the event based on its Signal.""" # Instantiate the reserved (system) events Event.EMPTY = Event(Signal.EMPTY, None) Event.ENTRY = Event(Signal.ENTRY, None) Event.EXIT = Event(Signal.EXIT, None) Event.INIT = Event(Signal.INIT, None) # Events for POSIX signals Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C) Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>) # The order of this tuple MUST match their respective signals Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT) class Hsm(object): """A Hierarchical State Machine (HSM). Full support for hierarchical state nesting. Guaranteed entry/exit action execution on arbitrary state transitions. Full support of nested initial transitions. Support for events with arbitrary parameters. """ # Every state handler must return one of these values RET_HANDLED = 0 RET_IGNORED = 1 RET_TRAN = 2 RET_SUPER = 3 def __init__(self,): """Sets this Hsm's current state to Hsm.top(), the default state and stores the given initial state. """ # self.state is the Hsm/act's current active state. # This instance variable references the message handler (method) # that will be called whenever a message is sent to this Hsm. # We initialize this to self.top, the default message handler self.state = self.top # Farc differs from QP here in that we hardcode # the initial state to be "_initial" self.initial_state = self._initial def _initial(self, event): """Raises a NotImplementedError to force the derived class to implement its own initial state. """ raise NotImplementedError def state(func): """A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future. """ @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, "farc_state", True) return staticmethod(func_wrap) # Helper functions to process reserved events through the current state @staticmethod def trig(me, state_func, signal): return state_func(me, Event.reserved[signal]) @staticmethod def enter(me, state_func): return state_func(me, Event.ENTRY) @staticmethod def exit(me, state_func): return state_func(me, Event.EXIT) # Other helper functions @staticmethod def handled(me, event): return Hsm.RET_HANDLED @staticmethod def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN @staticmethod def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158 @state def top(me, event): """This is the default state handler. This handler ignores all signals except the POSIX-like events, SIGINT/SIGTERM. Handling SIGINT/SIGTERM here causes the Exit path to be executed from the application's active state to top/here. The application may put something useful or nothing at all in the Exit path. """ # Handle the Posix-like events to force the HSM # to execute its Exit path all the way to the top if Event.SIGINT == event: return Hsm.RET_HANDLED if Event.SIGTERM == event: return Hsm.RET_HANDLED # All other events are quietly ignored return Hsm.RET_IGNORED # p. 165 @staticmethod def _perform_init_chain(me, current): """Act on the chain of initializations required starting from current. """ t = current while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN: # The state handles the INIT message and needs to make a transition. The # "top" state is special in that it does not handle INIT messages, so we # defer to me.initial_state in this case path = [] # Trace the path back to t via superstates while me.state != t: path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) # Restore the state to the target state me.state = path[0] assert len(path) < 32 # MAX_NEST_DEPTH # Perform ENTRY action for each state from current to the target path.reverse() # in-place for s in path: Hsm.enter(me, s) # The target state has now to be checked to see if it responds to the INIT message t = path[-1] # -1 because path was reversed return t @staticmethod def _perform_transition(me, source, target): # Handle the state transition from source to target in the HSM. s, t = source, target path = [t] if s == t: # Case (a), transition to self Hsm.exit(me,s) Hsm.enter(me,t) else: # Find parent of target Hsm.trig(me, t, Signal.EMPTY) t = me.state # t is now parent of target if s == t: # Case (b), source is parent of target Hsm.enter(me, path[0]) else: # Find parent of source Hsm.trig(me, s, Signal.EMPTY) if me.state == t: # Case (c), source and target share a parent Hsm.exit(me, s) Hsm.enter(me, path[0]) else: if me.state == path[0]: # Case (d), target is parent of source Hsm.exit(me, s) else: # Check if the source is an ancestor of the target (case (e)) lca_found = False path.append(t) # Populates path[1] t = me.state # t is now parent of source # Find and save ancestors of target into path # until we find the source or hit the top me.state = path[1] while me.state != Hsm.top: Hsm.trig(me, me.state, Signal.EMPTY) path.append(me.state) assert len(path) < 32 # MAX_NEST_DEPTH if me.state == s: lca_found = True break if lca_found: # This is case (e), enter states to get to target for st in reversed(path[:-1]): Hsm.enter(me, st) else: Hsm.exit(me, s) # Exit the source for cases (f), (g), (h) me.state = t # Start at parent of the source while me.state not in path: # Keep exiting up into superstates until we reach the LCA. # Depending on whether the EXIT signal is handled, we may also need # to send the EMPTY signal to make me.state climb to the superstate. if Hsm.exit(me, me.state) == Hsm.RET_HANDLED: Hsm.trig(me, me.state, Signal.EMPTY) t = me.state # Step into children until we enter the target for st in reversed(path[:path.index(t)]): Hsm.enter(me, st) @staticmethod def init(me, event = None): """Transitions to the initial state. Follows any INIT transitions from the inital state and performs ENTRY actions as it proceeds. Use this to pass any parameters to initialize the state machine. p. 172 """ # TODO: The initial state MUST transition to another state # The code that formerly did this was: # status = me.initial_state(me, event) # assert status == Hsm.RET_TRAN # But the above code is commented out so an Ahsm's _initial() # isn't executed twice. me.state = Hsm._perform_init_chain(me, Hsm.top) @staticmethod def dispatch(me, event): """Dispatches the given event to this Hsm. Follows the application's state transitions until the event is handled or top() is reached p. 174 """ Spy.on_hsm_dispatch_event(event) # Save the current state t = me.state # Proceed to superstates if event is not handled, we wish to find the superstate # (if any) that does handle the event and to record the path to that state exit_path = [] r = Hsm.RET_SUPER while r == Hsm.RET_SUPER: s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) # invoke state handler # We leave the while loop with s at the state which was able to respond # to the event, or to Hsm.top if none did Spy.on_hsm_dispatch_post(exit_path) # If the state handler for s requests a transition if r == Hsm.RET_TRAN: t = me.state # Store target of transition # Exit from the current state to the state s which handles # the transition. We do not exit from s=exit_path[-1] itself. for st in exit_path[:-1]: r = Hsm.exit(me, st) assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED) s = exit_path[-1] # Transition to t through the HSM Hsm._perform_transition(me, s, t) # Do initializations starting at t t = Hsm._perform_init_chain(me, t) # Restore the state me.state = t class Framework(object): """Framework is a composite class that holds: - the asyncio event loop - the registry of AHSMs - the set of TimeEvents - the handle to the next TimeEvent - the table subscriptions to events """ _event_loop = asyncio.get_event_loop() # The Framework maintains a registry of Ahsms in a list. _ahsm_registry = [] # The Framework maintains a dict of priorities in use # to prevent duplicates. # An Ahsm's priority is checked against this dict # within the Ahsm.start() method # when the Ahsm is added to the Framework. # The dict's key is the priority (integer) and the value is the Ahsm. _priority_dict = {} # The Framework maintains a group of TimeEvents in a dict. The next # expiration of the TimeEvent is the key and the event is the value. # Only the event with the next expiration time is scheduled for the # timeEventCallback(). As TimeEvents are added and removed, the scheduled # callback must be re-evaluated. Periodic TimeEvents should only have # one entry in the dict: the next expiration. The timeEventCallback() will # add a Periodic TimeEvent back into the dict with its next expiration. _time_events = {} # When a TimeEvent is scheduled for the timeEventCallback(), # a handle is kept so that the callback may be cancelled if necessary. _tm_event_handle = None # The Subscriber Table is a dictionary. The keys are signals. # The value for each key is a list of Ahsms that are subscribed to the # signal. An Ahsm may subscribe to a signal at any time during runtime. _subscriber_table = {} @staticmethod def post(event, act): """Posts the event to the given Ahsm's event queue. The argument, act, is an Ahsm instance. """ assert isinstance(act, Ahsm) act.postFIFO(event) @staticmethod def post_by_name(event, act_name): """Posts the event to the given Ahsm's event queue. The argument, act, is a string of the name of the class to which the event is sent. The event will post to all actors having the given classname. """ assert type(act_name) is str for act in Framework._ahsm_registry: if act.__class__.__name__ == act_name: act.postFIFO(event) @staticmethod def publish(event): """Posts the event to the message queue of every Ahsm that is subscribed to the event's signal. """ if event.signal in Framework._subscriber_table: for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def subscribe(signame, act): """Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already. """ sigid = Signal.register(signame) if sigid not in Framework._subscriber_table: Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act) @staticmethod def addTimeEvent(tm_event, delta): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) after the delay, delta. """ expiration = Framework._event_loop.time() + delta Framework.addTimeEventAt(tm_event, expiration) @staticmethod def addTimeEventAt(tm_event, abs_time): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) at the given absolute time (_event_loop.time()). """ assert tm_event not in Framework._time_events.values() Framework._insortTimeEvent(tm_event, abs_time) @staticmethod def _insortTimeEvent(tm_event, expiration): """Inserts a TimeEvent into the list of time events, sorted by the next expiration of the timer. If the expiration time matches an existing expiration, we add the smallest amount of time to the given expiration to avoid a key collision in the Dict and make the identically-timed events fire in a FIFO fashion. """ # If the event is to happen in the past, post it now now = Framework._event_loop.time() if expiration < now: tm_event.act.postFIFO(tm_event) # TODO: if periodic, need to schedule next? # If an event already occupies this expiration time, # increase this event's expiration by the smallest measurable amount while expiration in Framework._time_events.keys(): m, e = math.frexp(expiration) expiration = (m + sys.float_info.epsilon) * 2**e Framework._time_events[expiration] = tm_event # If this is the only active TimeEvent, schedule its callback if len(Framework._time_events) == 1: Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) # If there are other TimeEvents, # check if this one should replace the scheduled one else: if expiration < min(Framework._time_events.keys()): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) @staticmethod def removeTimeEvent(tm_event): """Removes the TimeEvent from the list of active time events. Cancels the TimeEvent's callback if there is one. Schedules the next event's callback if there is one. """ for k,v in Framework._time_events.items(): if v is tm_event: # If the event being removed is scheduled for callback, # cancel and schedule the next event if there is one if k == min(Framework._time_events.keys()): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if len(Framework._time_events) > 0: next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = \ Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break @staticmethod def timeEventCallback(tm_event, expiration): """The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events. """ assert expiration in Framework._time_events.keys(), ( "Exp:%d _time_events.keys():%s" % (expiration, Framework._time_events.keys())) # Remove this expired TimeEvent from the active list del Framework._time_events[expiration] Framework._tm_event_handle = None # Post the event to the target Ahsm tm_event.act.postFIFO(tm_event) # If this is a periodic time event, schedule its next expiration if tm_event.interval > 0: Framework._insortTimeEvent(tm_event, expiration + tm_event.interval) # If not set already and there are more events, set the next event callback if (Framework._tm_event_handle == None and len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def add(act): """Makes the framework aware of the given Ahsm. """ Framework._ahsm_registry.append(act) assert act.priority not in Framework._priority_dict, ( "Priority MUST be unique") Framework._priority_dict[act.priority] = act Spy.on_framework_add(act) @staticmethod def run(): """Dispatches an event to the highest priority Ahsm until all event queues are empty (i.e. Run To Completion). """ getPriority = lambda x : x.priority while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return @staticmethod def stop(): """EXITs all Ahsms and stops the event loop. """ # Disable the timer callback if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None # Post EXIT to all Ahsms for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) # Run to completion and stop the asyncio event loop Framework.run() Framework._event_loop.stop() Spy.on_framework_stop() @staticmethod def print_info(): """Prints the name and current state of each actor in the framework. Meant to be called when ctrl+T (SIGINFO/29) is issued. """ for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__) # Bind a useful set of POSIX signals to the handler # (ignore a NotImplementedError on Windows) try: _event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop()) _event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop()) _event_loop.add_signal_handler(29, print_info.__func__) except NotImplementedError: pass def run_forever(): """Runs the asyncio event loop with and ensures state machines are exited upon a KeyboardInterrupt. """ loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close() class Ahsm(Hsm): """An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO. Adds a priority, message queue and methods to work with the queue. """ def start(self, priority, initEvent=None): # must set the priority before Framework.add() which uses the priority self.priority = priority Framework.add(self) self.mq = collections.deque() self.init(self, initEvent) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) def postLIFO(self, evt): self.mq.append(evt) def postFIFO(self, evt): self.mq.appendleft(evt) def pop_msg(self,): return self.mq.pop() def has_msgs(self,): return len(self.mq) > 0 class TimeEvent(object): """TimeEvent is a composite class that contains an Event. A TimeEvent is created by the application and added to the Framework. The Framework then emits the event after the given delay. A one-shot TimeEvent is created by calling either postAt() or postIn(). A periodic TimeEvent is created by calling the postEvery() method. """ def __init__(self, signame): assert type(signame) == str self.signal = Signal.register(signame) self.value = None def postAt(self, act, abs_time): """Posts this TimeEvent to the given Ahsm at a specified time. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time) def postIn(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta) def postEvery(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta and every time delta thereafter until disarmed. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta) def disarm(self): """Removes this TimeEvent from the Framework's active time events. """ self.act = None Framework.removeTimeEvent(self) from .VcdSpy import VcdSpy
timeEventCallback
The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events.
import asyncio import collections import math import signal import sys from functools import wraps class Spy(object): """Spy is the debugging system for farc. farc contains a handful of Spy.on_*() methods placed at useful locations in the framework. It is up to a Spy driver (such as the included VcdSpy) to implement the Spy.on_*() methods. The programmer calls Spy.enable_spy(<Spy implementation class>) to activate the Spy system; otherwise, Spy does nothing. Therefore, this class is designed so that calling Spy.anything() is inert unless the application first calls Spy.enable_spy() """ _actv_cls = None @staticmethod def enable_spy(spy_cls): """Sets the Spy to use the given class and calls its initializer. """ Spy._actv_cls = spy_cls spy_cls.init() def __getattr__(*args): """Returns 1) the enable_spy static method if requested by name, or 2) the attribute from the active class (if active class was set), or 3) a function that swallows any arguments and does nothing. """ if args[1] == "enable_spy": return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return lambda *x: None # Singleton pattern: # Turn Spy into an instance of itself so __getattribute__ works # on anyone who calls "import Spy; Spy.foo()" # This prevents Spy() from creating a new instance # and gives everyone who calls "import Spy" the same object Spy = Spy() class Signal(object): """An asynchronous stimulus that triggers reactions. A unique identifier that, along with a value, specifies an Event. p. 154 """ _registry = {} # signame:str to sigid:int _lookup = [] # sigid:int to signame:str @staticmethod def exists(signame): """Returns True if signame is in the Signal registry. """ return signame in Signal._registry @staticmethod def register(signame): """Registers the signame if it is not already registered. Returns the signal number for the signame. """ assert type(signame) is str if signame in Signal._registry: # TODO: emit warning that signal is already registered return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid def __getattr__(self, signame): assert type(signame) is str return Signal._registry[signame] # Singleton pattern: # Turn Signal into an instance of itself so getattr works. # This also prevents Signal() from creating a new instance. Signal = Signal() # Register the reserved (system) signals Signal.register("EMPTY") # 0 Signal.register("ENTRY") # 1 Signal.register("EXIT") # 2 Signal.register("INIT") # 3 # Signals that mirror POSIX signals Signal.register("SIGINT") # (i.e. Ctrl+C) Signal.register("SIGTERM") # (i.e. kill <pid>) Event = collections.namedtuple("Event", ["signal", "value"]) Event.__doc__ = """Events are a tuple of (signal, value) that are passed from one AHSM to another. Signals are defined in each AHSM's source code by name, but resolve to a unique number. Values are any python value, including containers that contain even more values. Each AHSM state (static method) accepts an Event as the parameter and handles the event based on its Signal.""" # Instantiate the reserved (system) events Event.EMPTY = Event(Signal.EMPTY, None) Event.ENTRY = Event(Signal.ENTRY, None) Event.EXIT = Event(Signal.EXIT, None) Event.INIT = Event(Signal.INIT, None) # Events for POSIX signals Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C) Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>) # The order of this tuple MUST match their respective signals Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT) class Hsm(object): """A Hierarchical State Machine (HSM). Full support for hierarchical state nesting. Guaranteed entry/exit action execution on arbitrary state transitions. Full support of nested initial transitions. Support for events with arbitrary parameters. """ # Every state handler must return one of these values RET_HANDLED = 0 RET_IGNORED = 1 RET_TRAN = 2 RET_SUPER = 3 def __init__(self,): """Sets this Hsm's current state to Hsm.top(), the default state and stores the given initial state. """ # self.state is the Hsm/act's current active state. # This instance variable references the message handler (method) # that will be called whenever a message is sent to this Hsm. # We initialize this to self.top, the default message handler self.state = self.top # Farc differs from QP here in that we hardcode # the initial state to be "_initial" self.initial_state = self._initial def _initial(self, event): """Raises a NotImplementedError to force the derived class to implement its own initial state. """ raise NotImplementedError def state(func): """A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future. """ @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, "farc_state", True) return staticmethod(func_wrap) # Helper functions to process reserved events through the current state @staticmethod def trig(me, state_func, signal): return state_func(me, Event.reserved[signal]) @staticmethod def enter(me, state_func): return state_func(me, Event.ENTRY) @staticmethod def exit(me, state_func): return state_func(me, Event.EXIT) # Other helper functions @staticmethod def handled(me, event): return Hsm.RET_HANDLED @staticmethod def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN @staticmethod def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158 @state def top(me, event): """This is the default state handler. This handler ignores all signals except the POSIX-like events, SIGINT/SIGTERM. Handling SIGINT/SIGTERM here causes the Exit path to be executed from the application's active state to top/here. The application may put something useful or nothing at all in the Exit path. """ # Handle the Posix-like events to force the HSM # to execute its Exit path all the way to the top if Event.SIGINT == event: return Hsm.RET_HANDLED if Event.SIGTERM == event: return Hsm.RET_HANDLED # All other events are quietly ignored return Hsm.RET_IGNORED # p. 165 @staticmethod def _perform_init_chain(me, current): """Act on the chain of initializations required starting from current. """ t = current while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN: # The state handles the INIT message and needs to make a transition. The # "top" state is special in that it does not handle INIT messages, so we # defer to me.initial_state in this case path = [] # Trace the path back to t via superstates while me.state != t: path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) # Restore the state to the target state me.state = path[0] assert len(path) < 32 # MAX_NEST_DEPTH # Perform ENTRY action for each state from current to the target path.reverse() # in-place for s in path: Hsm.enter(me, s) # The target state has now to be checked to see if it responds to the INIT message t = path[-1] # -1 because path was reversed return t @staticmethod def _perform_transition(me, source, target): # Handle the state transition from source to target in the HSM. s, t = source, target path = [t] if s == t: # Case (a), transition to self Hsm.exit(me,s) Hsm.enter(me,t) else: # Find parent of target Hsm.trig(me, t, Signal.EMPTY) t = me.state # t is now parent of target if s == t: # Case (b), source is parent of target Hsm.enter(me, path[0]) else: # Find parent of source Hsm.trig(me, s, Signal.EMPTY) if me.state == t: # Case (c), source and target share a parent Hsm.exit(me, s) Hsm.enter(me, path[0]) else: if me.state == path[0]: # Case (d), target is parent of source Hsm.exit(me, s) else: # Check if the source is an ancestor of the target (case (e)) lca_found = False path.append(t) # Populates path[1] t = me.state # t is now parent of source # Find and save ancestors of target into path # until we find the source or hit the top me.state = path[1] while me.state != Hsm.top: Hsm.trig(me, me.state, Signal.EMPTY) path.append(me.state) assert len(path) < 32 # MAX_NEST_DEPTH if me.state == s: lca_found = True break if lca_found: # This is case (e), enter states to get to target for st in reversed(path[:-1]): Hsm.enter(me, st) else: Hsm.exit(me, s) # Exit the source for cases (f), (g), (h) me.state = t # Start at parent of the source while me.state not in path: # Keep exiting up into superstates until we reach the LCA. # Depending on whether the EXIT signal is handled, we may also need # to send the EMPTY signal to make me.state climb to the superstate. if Hsm.exit(me, me.state) == Hsm.RET_HANDLED: Hsm.trig(me, me.state, Signal.EMPTY) t = me.state # Step into children until we enter the target for st in reversed(path[:path.index(t)]): Hsm.enter(me, st) @staticmethod def init(me, event = None): """Transitions to the initial state. Follows any INIT transitions from the inital state and performs ENTRY actions as it proceeds. Use this to pass any parameters to initialize the state machine. p. 172 """ # TODO: The initial state MUST transition to another state # The code that formerly did this was: # status = me.initial_state(me, event) # assert status == Hsm.RET_TRAN # But the above code is commented out so an Ahsm's _initial() # isn't executed twice. me.state = Hsm._perform_init_chain(me, Hsm.top) @staticmethod def dispatch(me, event): """Dispatches the given event to this Hsm. Follows the application's state transitions until the event is handled or top() is reached p. 174 """ Spy.on_hsm_dispatch_event(event) # Save the current state t = me.state # Proceed to superstates if event is not handled, we wish to find the superstate # (if any) that does handle the event and to record the path to that state exit_path = [] r = Hsm.RET_SUPER while r == Hsm.RET_SUPER: s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) # invoke state handler # We leave the while loop with s at the state which was able to respond # to the event, or to Hsm.top if none did Spy.on_hsm_dispatch_post(exit_path) # If the state handler for s requests a transition if r == Hsm.RET_TRAN: t = me.state # Store target of transition # Exit from the current state to the state s which handles # the transition. We do not exit from s=exit_path[-1] itself. for st in exit_path[:-1]: r = Hsm.exit(me, st) assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED) s = exit_path[-1] # Transition to t through the HSM Hsm._perform_transition(me, s, t) # Do initializations starting at t t = Hsm._perform_init_chain(me, t) # Restore the state me.state = t class Framework(object): """Framework is a composite class that holds: - the asyncio event loop - the registry of AHSMs - the set of TimeEvents - the handle to the next TimeEvent - the table subscriptions to events """ _event_loop = asyncio.get_event_loop() # The Framework maintains a registry of Ahsms in a list. _ahsm_registry = [] # The Framework maintains a dict of priorities in use # to prevent duplicates. # An Ahsm's priority is checked against this dict # within the Ahsm.start() method # when the Ahsm is added to the Framework. # The dict's key is the priority (integer) and the value is the Ahsm. _priority_dict = {} # The Framework maintains a group of TimeEvents in a dict. The next # expiration of the TimeEvent is the key and the event is the value. # Only the event with the next expiration time is scheduled for the # timeEventCallback(). As TimeEvents are added and removed, the scheduled # callback must be re-evaluated. Periodic TimeEvents should only have # one entry in the dict: the next expiration. The timeEventCallback() will # add a Periodic TimeEvent back into the dict with its next expiration. _time_events = {} # When a TimeEvent is scheduled for the timeEventCallback(), # a handle is kept so that the callback may be cancelled if necessary. _tm_event_handle = None # The Subscriber Table is a dictionary. The keys are signals. # The value for each key is a list of Ahsms that are subscribed to the # signal. An Ahsm may subscribe to a signal at any time during runtime. _subscriber_table = {} @staticmethod def post(event, act): """Posts the event to the given Ahsm's event queue. The argument, act, is an Ahsm instance. """ assert isinstance(act, Ahsm) act.postFIFO(event) @staticmethod def post_by_name(event, act_name): """Posts the event to the given Ahsm's event queue. The argument, act, is a string of the name of the class to which the event is sent. The event will post to all actors having the given classname. """ assert type(act_name) is str for act in Framework._ahsm_registry: if act.__class__.__name__ == act_name: act.postFIFO(event) @staticmethod def publish(event): """Posts the event to the message queue of every Ahsm that is subscribed to the event's signal. """ if event.signal in Framework._subscriber_table: for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def subscribe(signame, act): """Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already. """ sigid = Signal.register(signame) if sigid not in Framework._subscriber_table: Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act) @staticmethod def addTimeEvent(tm_event, delta): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) after the delay, delta. """ expiration = Framework._event_loop.time() + delta Framework.addTimeEventAt(tm_event, expiration) @staticmethod def addTimeEventAt(tm_event, abs_time): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) at the given absolute time (_event_loop.time()). """ assert tm_event not in Framework._time_events.values() Framework._insortTimeEvent(tm_event, abs_time) @staticmethod def _insortTimeEvent(tm_event, expiration): """Inserts a TimeEvent into the list of time events, sorted by the next expiration of the timer. If the expiration time matches an existing expiration, we add the smallest amount of time to the given expiration to avoid a key collision in the Dict and make the identically-timed events fire in a FIFO fashion. """ # If the event is to happen in the past, post it now now = Framework._event_loop.time() if expiration < now: tm_event.act.postFIFO(tm_event) # TODO: if periodic, need to schedule next? # If an event already occupies this expiration time, # increase this event's expiration by the smallest measurable amount while expiration in Framework._time_events.keys(): m, e = math.frexp(expiration) expiration = (m + sys.float_info.epsilon) * 2**e Framework._time_events[expiration] = tm_event # If this is the only active TimeEvent, schedule its callback if len(Framework._time_events) == 1: Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) # If there are other TimeEvents, # check if this one should replace the scheduled one else: if expiration < min(Framework._time_events.keys()): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) @staticmethod def removeTimeEvent(tm_event): """Removes the TimeEvent from the list of active time events. Cancels the TimeEvent's callback if there is one. Schedules the next event's callback if there is one. """ for k,v in Framework._time_events.items(): if v is tm_event: # If the event being removed is scheduled for callback, # cancel and schedule the next event if there is one if k == min(Framework._time_events.keys()): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if len(Framework._time_events) > 0: next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = \ Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break # MASKED: timeEventCallback function (lines 538-571) @staticmethod def add(act): """Makes the framework aware of the given Ahsm. """ Framework._ahsm_registry.append(act) assert act.priority not in Framework._priority_dict, ( "Priority MUST be unique") Framework._priority_dict[act.priority] = act Spy.on_framework_add(act) @staticmethod def run(): """Dispatches an event to the highest priority Ahsm until all event queues are empty (i.e. Run To Completion). """ getPriority = lambda x : x.priority while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return @staticmethod def stop(): """EXITs all Ahsms and stops the event loop. """ # Disable the timer callback if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None # Post EXIT to all Ahsms for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) # Run to completion and stop the asyncio event loop Framework.run() Framework._event_loop.stop() Spy.on_framework_stop() @staticmethod def print_info(): """Prints the name and current state of each actor in the framework. Meant to be called when ctrl+T (SIGINFO/29) is issued. """ for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__) # Bind a useful set of POSIX signals to the handler # (ignore a NotImplementedError on Windows) try: _event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop()) _event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop()) _event_loop.add_signal_handler(29, print_info.__func__) except NotImplementedError: pass def run_forever(): """Runs the asyncio event loop with and ensures state machines are exited upon a KeyboardInterrupt. """ loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close() class Ahsm(Hsm): """An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO. Adds a priority, message queue and methods to work with the queue. """ def start(self, priority, initEvent=None): # must set the priority before Framework.add() which uses the priority self.priority = priority Framework.add(self) self.mq = collections.deque() self.init(self, initEvent) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) def postLIFO(self, evt): self.mq.append(evt) def postFIFO(self, evt): self.mq.appendleft(evt) def pop_msg(self,): return self.mq.pop() def has_msgs(self,): return len(self.mq) > 0 class TimeEvent(object): """TimeEvent is a composite class that contains an Event. A TimeEvent is created by the application and added to the Framework. The Framework then emits the event after the given delay. A one-shot TimeEvent is created by calling either postAt() or postIn(). A periodic TimeEvent is created by calling the postEvery() method. """ def __init__(self, signame): assert type(signame) == str self.signal = Signal.register(signame) self.value = None def postAt(self, act, abs_time): """Posts this TimeEvent to the given Ahsm at a specified time. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time) def postIn(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta) def postEvery(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta and every time delta thereafter until disarmed. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta) def disarm(self): """Removes this TimeEvent from the Framework's active time events. """ self.act = None Framework.removeTimeEvent(self) from .VcdSpy import VcdSpy
@staticmethod def timeEventCallback(tm_event, expiration): """The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events. """ assert expiration in Framework._time_events.keys(), ( "Exp:%d _time_events.keys():%s" % (expiration, Framework._time_events.keys())) # Remove this expired TimeEvent from the active list del Framework._time_events[expiration] Framework._tm_event_handle = None # Post the event to the target Ahsm tm_event.act.postFIFO(tm_event) # If this is a periodic time event, schedule its next expiration if tm_event.interval > 0: Framework._insortTimeEvent(tm_event, expiration + tm_event.interval) # If not set already and there are more events, set the next event callback if (Framework._tm_event_handle == None and len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run)
538
571
import asyncio import collections import math import signal import sys from functools import wraps class Spy(object): """Spy is the debugging system for farc. farc contains a handful of Spy.on_*() methods placed at useful locations in the framework. It is up to a Spy driver (such as the included VcdSpy) to implement the Spy.on_*() methods. The programmer calls Spy.enable_spy(<Spy implementation class>) to activate the Spy system; otherwise, Spy does nothing. Therefore, this class is designed so that calling Spy.anything() is inert unless the application first calls Spy.enable_spy() """ _actv_cls = None @staticmethod def enable_spy(spy_cls): """Sets the Spy to use the given class and calls its initializer. """ Spy._actv_cls = spy_cls spy_cls.init() def __getattr__(*args): """Returns 1) the enable_spy static method if requested by name, or 2) the attribute from the active class (if active class was set), or 3) a function that swallows any arguments and does nothing. """ if args[1] == "enable_spy": return Spy.enable_spy if Spy._actv_cls: return getattr(Spy._actv_cls, args[1]) return lambda *x: None # Singleton pattern: # Turn Spy into an instance of itself so __getattribute__ works # on anyone who calls "import Spy; Spy.foo()" # This prevents Spy() from creating a new instance # and gives everyone who calls "import Spy" the same object Spy = Spy() class Signal(object): """An asynchronous stimulus that triggers reactions. A unique identifier that, along with a value, specifies an Event. p. 154 """ _registry = {} # signame:str to sigid:int _lookup = [] # sigid:int to signame:str @staticmethod def exists(signame): """Returns True if signame is in the Signal registry. """ return signame in Signal._registry @staticmethod def register(signame): """Registers the signame if it is not already registered. Returns the signal number for the signame. """ assert type(signame) is str if signame in Signal._registry: # TODO: emit warning that signal is already registered return Signal._registry[signame] else: sigid = len(Signal._lookup) Signal._registry[signame] = sigid Signal._lookup.append(signame) Spy.on_signal_register(signame, sigid) return sigid def __getattr__(self, signame): assert type(signame) is str return Signal._registry[signame] # Singleton pattern: # Turn Signal into an instance of itself so getattr works. # This also prevents Signal() from creating a new instance. Signal = Signal() # Register the reserved (system) signals Signal.register("EMPTY") # 0 Signal.register("ENTRY") # 1 Signal.register("EXIT") # 2 Signal.register("INIT") # 3 # Signals that mirror POSIX signals Signal.register("SIGINT") # (i.e. Ctrl+C) Signal.register("SIGTERM") # (i.e. kill <pid>) Event = collections.namedtuple("Event", ["signal", "value"]) Event.__doc__ = """Events are a tuple of (signal, value) that are passed from one AHSM to another. Signals are defined in each AHSM's source code by name, but resolve to a unique number. Values are any python value, including containers that contain even more values. Each AHSM state (static method) accepts an Event as the parameter and handles the event based on its Signal.""" # Instantiate the reserved (system) events Event.EMPTY = Event(Signal.EMPTY, None) Event.ENTRY = Event(Signal.ENTRY, None) Event.EXIT = Event(Signal.EXIT, None) Event.INIT = Event(Signal.INIT, None) # Events for POSIX signals Event.SIGINT = Event(Signal.SIGINT, None) # (i.e. Ctrl+C) Event.SIGTERM = Event(Signal.SIGTERM, None) # (i.e. kill <pid>) # The order of this tuple MUST match their respective signals Event.reserved = (Event.EMPTY, Event.ENTRY, Event.EXIT, Event.INIT) class Hsm(object): """A Hierarchical State Machine (HSM). Full support for hierarchical state nesting. Guaranteed entry/exit action execution on arbitrary state transitions. Full support of nested initial transitions. Support for events with arbitrary parameters. """ # Every state handler must return one of these values RET_HANDLED = 0 RET_IGNORED = 1 RET_TRAN = 2 RET_SUPER = 3 def __init__(self,): """Sets this Hsm's current state to Hsm.top(), the default state and stores the given initial state. """ # self.state is the Hsm/act's current active state. # This instance variable references the message handler (method) # that will be called whenever a message is sent to this Hsm. # We initialize this to self.top, the default message handler self.state = self.top # Farc differs from QP here in that we hardcode # the initial state to be "_initial" self.initial_state = self._initial def _initial(self, event): """Raises a NotImplementedError to force the derived class to implement its own initial state. """ raise NotImplementedError def state(func): """A decorator that identifies which methods are states. The presence of the farc_state attr, not the value of the attr, determines statehood. The Spy debugging system uses the farc_state attribute to determine which methods inside a class are actually states. Other uses of the attribute may come in the future. """ @wraps(func) def func_wrap(self, evt): result = func(self, evt) Spy.on_state_handler_called(func_wrap, evt, result) return result setattr(func_wrap, "farc_state", True) return staticmethod(func_wrap) # Helper functions to process reserved events through the current state @staticmethod def trig(me, state_func, signal): return state_func(me, Event.reserved[signal]) @staticmethod def enter(me, state_func): return state_func(me, Event.ENTRY) @staticmethod def exit(me, state_func): return state_func(me, Event.EXIT) # Other helper functions @staticmethod def handled(me, event): return Hsm.RET_HANDLED @staticmethod def tran(me, nextState): me.state = nextState; return Hsm.RET_TRAN @staticmethod def super(me, superState): me.state = superState; return Hsm.RET_SUPER # p. 158 @state def top(me, event): """This is the default state handler. This handler ignores all signals except the POSIX-like events, SIGINT/SIGTERM. Handling SIGINT/SIGTERM here causes the Exit path to be executed from the application's active state to top/here. The application may put something useful or nothing at all in the Exit path. """ # Handle the Posix-like events to force the HSM # to execute its Exit path all the way to the top if Event.SIGINT == event: return Hsm.RET_HANDLED if Event.SIGTERM == event: return Hsm.RET_HANDLED # All other events are quietly ignored return Hsm.RET_IGNORED # p. 165 @staticmethod def _perform_init_chain(me, current): """Act on the chain of initializations required starting from current. """ t = current while Hsm.trig(me, t if t != Hsm.top else me.initial_state, Signal.INIT) == Hsm.RET_TRAN: # The state handles the INIT message and needs to make a transition. The # "top" state is special in that it does not handle INIT messages, so we # defer to me.initial_state in this case path = [] # Trace the path back to t via superstates while me.state != t: path.append(me.state) Hsm.trig(me, me.state, Signal.EMPTY) # Restore the state to the target state me.state = path[0] assert len(path) < 32 # MAX_NEST_DEPTH # Perform ENTRY action for each state from current to the target path.reverse() # in-place for s in path: Hsm.enter(me, s) # The target state has now to be checked to see if it responds to the INIT message t = path[-1] # -1 because path was reversed return t @staticmethod def _perform_transition(me, source, target): # Handle the state transition from source to target in the HSM. s, t = source, target path = [t] if s == t: # Case (a), transition to self Hsm.exit(me,s) Hsm.enter(me,t) else: # Find parent of target Hsm.trig(me, t, Signal.EMPTY) t = me.state # t is now parent of target if s == t: # Case (b), source is parent of target Hsm.enter(me, path[0]) else: # Find parent of source Hsm.trig(me, s, Signal.EMPTY) if me.state == t: # Case (c), source and target share a parent Hsm.exit(me, s) Hsm.enter(me, path[0]) else: if me.state == path[0]: # Case (d), target is parent of source Hsm.exit(me, s) else: # Check if the source is an ancestor of the target (case (e)) lca_found = False path.append(t) # Populates path[1] t = me.state # t is now parent of source # Find and save ancestors of target into path # until we find the source or hit the top me.state = path[1] while me.state != Hsm.top: Hsm.trig(me, me.state, Signal.EMPTY) path.append(me.state) assert len(path) < 32 # MAX_NEST_DEPTH if me.state == s: lca_found = True break if lca_found: # This is case (e), enter states to get to target for st in reversed(path[:-1]): Hsm.enter(me, st) else: Hsm.exit(me, s) # Exit the source for cases (f), (g), (h) me.state = t # Start at parent of the source while me.state not in path: # Keep exiting up into superstates until we reach the LCA. # Depending on whether the EXIT signal is handled, we may also need # to send the EMPTY signal to make me.state climb to the superstate. if Hsm.exit(me, me.state) == Hsm.RET_HANDLED: Hsm.trig(me, me.state, Signal.EMPTY) t = me.state # Step into children until we enter the target for st in reversed(path[:path.index(t)]): Hsm.enter(me, st) @staticmethod def init(me, event = None): """Transitions to the initial state. Follows any INIT transitions from the inital state and performs ENTRY actions as it proceeds. Use this to pass any parameters to initialize the state machine. p. 172 """ # TODO: The initial state MUST transition to another state # The code that formerly did this was: # status = me.initial_state(me, event) # assert status == Hsm.RET_TRAN # But the above code is commented out so an Ahsm's _initial() # isn't executed twice. me.state = Hsm._perform_init_chain(me, Hsm.top) @staticmethod def dispatch(me, event): """Dispatches the given event to this Hsm. Follows the application's state transitions until the event is handled or top() is reached p. 174 """ Spy.on_hsm_dispatch_event(event) # Save the current state t = me.state # Proceed to superstates if event is not handled, we wish to find the superstate # (if any) that does handle the event and to record the path to that state exit_path = [] r = Hsm.RET_SUPER while r == Hsm.RET_SUPER: s = me.state exit_path.append(s) Spy.on_hsm_dispatch_pre(s) r = s(me, event) # invoke state handler # We leave the while loop with s at the state which was able to respond # to the event, or to Hsm.top if none did Spy.on_hsm_dispatch_post(exit_path) # If the state handler for s requests a transition if r == Hsm.RET_TRAN: t = me.state # Store target of transition # Exit from the current state to the state s which handles # the transition. We do not exit from s=exit_path[-1] itself. for st in exit_path[:-1]: r = Hsm.exit(me, st) assert (r == Hsm.RET_SUPER) or (r == Hsm.RET_HANDLED) s = exit_path[-1] # Transition to t through the HSM Hsm._perform_transition(me, s, t) # Do initializations starting at t t = Hsm._perform_init_chain(me, t) # Restore the state me.state = t class Framework(object): """Framework is a composite class that holds: - the asyncio event loop - the registry of AHSMs - the set of TimeEvents - the handle to the next TimeEvent - the table subscriptions to events """ _event_loop = asyncio.get_event_loop() # The Framework maintains a registry of Ahsms in a list. _ahsm_registry = [] # The Framework maintains a dict of priorities in use # to prevent duplicates. # An Ahsm's priority is checked against this dict # within the Ahsm.start() method # when the Ahsm is added to the Framework. # The dict's key is the priority (integer) and the value is the Ahsm. _priority_dict = {} # The Framework maintains a group of TimeEvents in a dict. The next # expiration of the TimeEvent is the key and the event is the value. # Only the event with the next expiration time is scheduled for the # timeEventCallback(). As TimeEvents are added and removed, the scheduled # callback must be re-evaluated. Periodic TimeEvents should only have # one entry in the dict: the next expiration. The timeEventCallback() will # add a Periodic TimeEvent back into the dict with its next expiration. _time_events = {} # When a TimeEvent is scheduled for the timeEventCallback(), # a handle is kept so that the callback may be cancelled if necessary. _tm_event_handle = None # The Subscriber Table is a dictionary. The keys are signals. # The value for each key is a list of Ahsms that are subscribed to the # signal. An Ahsm may subscribe to a signal at any time during runtime. _subscriber_table = {} @staticmethod def post(event, act): """Posts the event to the given Ahsm's event queue. The argument, act, is an Ahsm instance. """ assert isinstance(act, Ahsm) act.postFIFO(event) @staticmethod def post_by_name(event, act_name): """Posts the event to the given Ahsm's event queue. The argument, act, is a string of the name of the class to which the event is sent. The event will post to all actors having the given classname. """ assert type(act_name) is str for act in Framework._ahsm_registry: if act.__class__.__name__ == act_name: act.postFIFO(event) @staticmethod def publish(event): """Posts the event to the message queue of every Ahsm that is subscribed to the event's signal. """ if event.signal in Framework._subscriber_table: for act in Framework._subscriber_table[event.signal]: act.postFIFO(event) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def subscribe(signame, act): """Adds the given Ahsm to the subscriber table list for the given signal. The argument, signame, is a string of the name of the Signal to which the Ahsm is subscribing. Using a string allows the Signal to be created in the registry if it is not already. """ sigid = Signal.register(signame) if sigid not in Framework._subscriber_table: Framework._subscriber_table[sigid] = [] Framework._subscriber_table[sigid].append(act) @staticmethod def addTimeEvent(tm_event, delta): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) after the delay, delta. """ expiration = Framework._event_loop.time() + delta Framework.addTimeEventAt(tm_event, expiration) @staticmethod def addTimeEventAt(tm_event, abs_time): """Adds the TimeEvent to the list of time events in the Framework. The event will fire its signal (to the TimeEvent's target Ahsm) at the given absolute time (_event_loop.time()). """ assert tm_event not in Framework._time_events.values() Framework._insortTimeEvent(tm_event, abs_time) @staticmethod def _insortTimeEvent(tm_event, expiration): """Inserts a TimeEvent into the list of time events, sorted by the next expiration of the timer. If the expiration time matches an existing expiration, we add the smallest amount of time to the given expiration to avoid a key collision in the Dict and make the identically-timed events fire in a FIFO fashion. """ # If the event is to happen in the past, post it now now = Framework._event_loop.time() if expiration < now: tm_event.act.postFIFO(tm_event) # TODO: if periodic, need to schedule next? # If an event already occupies this expiration time, # increase this event's expiration by the smallest measurable amount while expiration in Framework._time_events.keys(): m, e = math.frexp(expiration) expiration = (m + sys.float_info.epsilon) * 2**e Framework._time_events[expiration] = tm_event # If this is the only active TimeEvent, schedule its callback if len(Framework._time_events) == 1: Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) # If there are other TimeEvents, # check if this one should replace the scheduled one else: if expiration < min(Framework._time_events.keys()): Framework._tm_event_handle.cancel() Framework._tm_event_handle = Framework._event_loop.call_at( expiration, Framework.timeEventCallback, tm_event, expiration) @staticmethod def removeTimeEvent(tm_event): """Removes the TimeEvent from the list of active time events. Cancels the TimeEvent's callback if there is one. Schedules the next event's callback if there is one. """ for k,v in Framework._time_events.items(): if v is tm_event: # If the event being removed is scheduled for callback, # cancel and schedule the next event if there is one if k == min(Framework._time_events.keys()): del Framework._time_events[k] if Framework._tm_event_handle: Framework._tm_event_handle.cancel() if len(Framework._time_events) > 0: next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = \ Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) else: Framework._tm_event_handle = None else: del Framework._time_events[k] break @staticmethod def timeEventCallback(tm_event, expiration): """The callback function for all TimeEvents. Posts the event to the event's target Ahsm. If the TimeEvent is periodic, re-insort the event in the list of active time events. """ assert expiration in Framework._time_events.keys(), ( "Exp:%d _time_events.keys():%s" % (expiration, Framework._time_events.keys())) # Remove this expired TimeEvent from the active list del Framework._time_events[expiration] Framework._tm_event_handle = None # Post the event to the target Ahsm tm_event.act.postFIFO(tm_event) # If this is a periodic time event, schedule its next expiration if tm_event.interval > 0: Framework._insortTimeEvent(tm_event, expiration + tm_event.interval) # If not set already and there are more events, set the next event callback if (Framework._tm_event_handle == None and len(Framework._time_events) > 0): next_expiration = min(Framework._time_events.keys()) next_event = Framework._time_events[next_expiration] Framework._tm_event_handle = Framework._event_loop.call_at( next_expiration, Framework.timeEventCallback, next_event, next_expiration) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) @staticmethod def add(act): """Makes the framework aware of the given Ahsm. """ Framework._ahsm_registry.append(act) assert act.priority not in Framework._priority_dict, ( "Priority MUST be unique") Framework._priority_dict[act.priority] = act Spy.on_framework_add(act) @staticmethod def run(): """Dispatches an event to the highest priority Ahsm until all event queues are empty (i.e. Run To Completion). """ getPriority = lambda x : x.priority while True: allQueuesEmpty = True sorted_acts = sorted(Framework._ahsm_registry, key=getPriority) for act in sorted_acts: if act.has_msgs(): event_next = act.pop_msg() act.dispatch(act, event_next) allQueuesEmpty = False break if allQueuesEmpty: return @staticmethod def stop(): """EXITs all Ahsms and stops the event loop. """ # Disable the timer callback if Framework._tm_event_handle: Framework._tm_event_handle.cancel() Framework._tm_event_handle = None # Post EXIT to all Ahsms for act in Framework._ahsm_registry: Framework.post(Event.EXIT, act) # Run to completion and stop the asyncio event loop Framework.run() Framework._event_loop.stop() Spy.on_framework_stop() @staticmethod def print_info(): """Prints the name and current state of each actor in the framework. Meant to be called when ctrl+T (SIGINFO/29) is issued. """ for act in Framework._ahsm_registry: print(act.__class__.__name__, act.state.__name__) # Bind a useful set of POSIX signals to the handler # (ignore a NotImplementedError on Windows) try: _event_loop.add_signal_handler(signal.SIGINT, lambda: Framework.stop()) _event_loop.add_signal_handler(signal.SIGTERM, lambda: Framework.stop()) _event_loop.add_signal_handler(29, print_info.__func__) except NotImplementedError: pass def run_forever(): """Runs the asyncio event loop with and ensures state machines are exited upon a KeyboardInterrupt. """ loop = asyncio.get_event_loop() try: loop.run_forever() except KeyboardInterrupt: Framework.stop() loop.close() class Ahsm(Hsm): """An Augmented Hierarchical State Machine (AHSM); a.k.a. ActiveObject/AO. Adds a priority, message queue and methods to work with the queue. """ def start(self, priority, initEvent=None): # must set the priority before Framework.add() which uses the priority self.priority = priority Framework.add(self) self.mq = collections.deque() self.init(self, initEvent) # Run to completion Framework._event_loop.call_soon_threadsafe(Framework.run) def postLIFO(self, evt): self.mq.append(evt) def postFIFO(self, evt): self.mq.appendleft(evt) def pop_msg(self,): return self.mq.pop() def has_msgs(self,): return len(self.mq) > 0 class TimeEvent(object): """TimeEvent is a composite class that contains an Event. A TimeEvent is created by the application and added to the Framework. The Framework then emits the event after the given delay. A one-shot TimeEvent is created by calling either postAt() or postIn(). A periodic TimeEvent is created by calling the postEvery() method. """ def __init__(self, signame): assert type(signame) == str self.signal = Signal.register(signame) self.value = None def postAt(self, act, abs_time): """Posts this TimeEvent to the given Ahsm at a specified time. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEventAt(self, abs_time) def postIn(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = 0 Framework.addTimeEvent(self, delta) def postEvery(self, act, delta): """Posts this TimeEvent to the given Ahsm after the time delta and every time delta thereafter until disarmed. """ assert issubclass(type(act), Ahsm) self.act = act self.interval = delta Framework.addTimeEvent(self, delta) def disarm(self): """Removes this TimeEvent from the Framework's active time events. """ self.act = None Framework.removeTimeEvent(self) from .VcdSpy import VcdSpy
_apply_relativistic_doppler_shift
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity.
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) # MASKED: _apply_relativistic_doppler_shift function (lines 53-85) def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!")
53
85
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
_validate_coordinate
Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages.
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) # MASKED: _validate_coordinate function (lines 247-299) def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
@staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord
247
299
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
replicate
Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord # MASKED: replicate function (lines 301-374) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
301
374
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
_normalized_position_vector
Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation.
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat # MASKED: _normalized_position_vector function (lines 519-546) @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
@staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat
519
546
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
with_radial_velocity_shift
Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`.
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord # MASKED: with_radial_velocity_shift function (lines 635-716) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target)
635
716
import warnings from textwrap import indent import astropy.units as u import numpy as np from astropy.constants import c from astropy.coordinates import (ICRS, CartesianDifferential, CartesianRepresentation, SkyCoord) from astropy.coordinates.spectral_quantity import SpectralQuantity from astropy.coordinates.baseframe import (BaseCoordinateFrame, frame_transform_graph) from astropy.utils.exceptions import AstropyUserWarning __all__ = ['SpectralCoord'] class NoVelocityWarning(AstropyUserWarning): pass class NoDistanceWarning(AstropyUserWarning): pass KMS = u.km / u.s C_KMS = c.to(KMS) ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS) # Default distance to use for target when none is provided DEFAULT_DISTANCE = 1e6 * u.kpc # We don't want to run doctests in the docstrings we inherit from Quantity __doctest_skip__ = ['SpectralCoord.*'] def _velocity_to_redshift(velocity): """ Convert a velocity to a relativistic redshift. """ beta = velocity / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 def _redshift_to_velocity(redshift): """ Convert a relativistic redshift to a velocity. """ zponesq = (1 + redshift) ** 2 return (C_KMS * (zponesq - 1) / (zponesq + 1)) def _apply_relativistic_doppler_shift(scoord, velocity): """ Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity` that is Doppler shifted by this amount. Note that the Doppler shift applied is the full relativistic one, so `SpectralQuantity` currently expressed in velocity and not using the relativistic convention will temporarily be converted to use the relativistic convention while the shift is applied. Positive velocities are assumed to redshift the spectral quantity, while negative velocities blueshift the spectral quantity. """ # NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact # since we can't guarantee that their metadata would be correct/consistent. squantity = scoord.view(SpectralQuantity) beta = velocity / c doppler_factor = np.sqrt((1 + beta) / (1 - beta)) if squantity.unit.is_equivalent(u.m): # wavelength return squantity * doppler_factor elif (squantity.unit.is_equivalent(u.Hz) or squantity.unit.is_equivalent(u.eV) or squantity.unit.is_equivalent(1 / u.m)): return squantity / doppler_factor elif squantity.unit.is_equivalent(KMS): # velocity return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit) else: # pragma: no cover raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. " "This should not happen, so please report this in the " "astropy issue tracker!") def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False): """ Given an original coordinate object, update the differentials so that the final coordinate is at the same location as the original coordinate but co-moving with the velocity reference object. If preserve_original_frame is set to True, the resulting object will be in the frame of the original coordinate, otherwise it will be in the frame of the velocity reference. """ if not velocity_reference.data.differentials: raise ValueError("Reference frame has no velocities") # If the reference has an obstime already defined, we should ignore # it and stick with the original observer obstime. if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'): velocity_reference = velocity_reference.replicate(obstime=original.obstime) # We transform both coordinates to ICRS for simplicity and because we know # it's a simple frame that is not time-dependent (it could be that both # the original and velocity_reference frame are time-dependent) original_icrs = original.transform_to(ICRS()) velocity_reference_icrs = velocity_reference.transform_to(ICRS()) differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation, CartesianDifferential).differentials data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation) .with_differentials(differentials)) final_icrs = original_icrs.realize_frame(data_with_differentials) if preserve_observer_frame: final = final_icrs.transform_to(original) else: final = final_icrs.transform_to(velocity_reference) return final.replicate(representation_type=CartesianRepresentation, differential_type=CartesianDifferential) def attach_zero_velocities(coord): """ Set the differentials to be stationary on a coordinate object. """ new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES) return coord.realize_frame(new_data) def _get_velocities(coord): if 's' in coord.data.differentials: return coord.velocity else: return ZERO_VELOCITIES class SpectralCoord(SpectralQuantity): """ A spectral coordinate with its corresponding unit. .. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be considered experimental at this time. Note that we do not fully support cases where the observer and target are moving relativistically relative to each other, so care should be taken in those cases. It is possible that there will be API changes in future versions of Astropy based on user feedback. If you have specific ideas for how it might be improved, please let us know on the `astropy-dev mailing list`_ or at http://feedback.astropy.org. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord` Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. If no velocities are present on this object, the observer is assumed to be stationary relative to the frame origin. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. If no velocities are present on this object, the target is assumed to be stationary relative to the frame origin. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. This can only be specified if ``redshift`` is not specified. redshift : float, optional The relativistic redshift of the target with respect to the observer. This can only be specified if ``radial_velocity`` cannot be specified. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. """ @u.quantity_input(radial_velocity=u.km/u.s) def __new__(cls, value, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, **kwargs): obj = super().__new__(cls, value, unit=unit, **kwargs) # There are two main modes of operation in this class. Either the # observer and target are both defined, in which case the radial # velocity and redshift are automatically computed from these, or # only one of the observer and target are specified, along with a # manually specified radial velocity or redshift. So if a target and # observer are both specified, we can't also accept a radial velocity # or redshift. if target is not None and observer is not None: if radial_velocity is not None or redshift is not None: raise ValueError("Cannot specify radial velocity or redshift if both " "target and observer are specified") # We only deal with redshifts here and in the redshift property. # Otherwise internally we always deal with velocities. if redshift is not None: if radial_velocity is not None: raise ValueError("Cannot set both a radial velocity and redshift") redshift = u.Quantity(redshift) # For now, we can't specify redshift=u.one in quantity_input above # and have it work with plain floats, but if that is fixed, for # example as in https://github.com/astropy/astropy/pull/10232, we # can remove the check here and add redshift=u.one to the decorator if not redshift.unit.is_equivalent(u.one): raise u.UnitsError('redshift should be dimensionless') radial_velocity = _redshift_to_velocity(redshift) # If we're initializing from an existing SpectralCoord, keep any # parameters that aren't being overridden if observer is None: observer = getattr(value, 'observer', None) if target is None: target = getattr(value, 'target', None) # As mentioned above, we should only specify the radial velocity # manually if either or both the observer and target are not # specified. if observer is None or target is None: if radial_velocity is None: radial_velocity = getattr(value, 'radial_velocity', None) obj._radial_velocity = radial_velocity obj._observer = cls._validate_coordinate(observer, label='observer') obj._target = cls._validate_coordinate(target, label='target') return obj def __array_finalize__(self, obj): super().__array_finalize__(obj) self._radial_velocity = getattr(obj, '_radial_velocity', None) self._observer = getattr(obj, '_observer', None) self._target = getattr(obj, '_target', None) @staticmethod def _validate_coordinate(coord, label=''): """ Checks the type of the frame and whether a velocity differential and a distance has been defined on the frame object. If no distance is defined, the target is assumed to be "really far away", and the observer is assumed to be "in the solar system". Parameters ---------- coord : `~astropy.coordinates.BaseCoordinateFrame` The new frame to be used for target or observer. label : str, optional The name of the object being validated (e.g. 'target' or 'observer'), which is then used in error messages. """ if coord is None: return if not issubclass(coord.__class__, BaseCoordinateFrame): if isinstance(coord, SkyCoord): coord = coord.frame else: raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance") # If the distance is not well-defined, ensure that it works properly # for generating differentials # TODO: change this to not set the distance and yield a warning once # there's a good way to address this in astropy.coordinates # https://github.com/astropy/astropy/issues/10247 with np.errstate(all='ignore'): distance = getattr(coord, 'distance', None) if distance is not None and distance.unit.physical_type == 'dimensionless': coord = SkyCoord(coord, distance=DEFAULT_DISTANCE) warnings.warn( "Distance on coordinate object is dimensionless, an " f"abritrary distance value of {DEFAULT_DISTANCE} will be set instead.", NoDistanceWarning) # If the observer frame does not contain information about the # velocity of the system, assume that the velocity is zero in the # system. if 's' not in coord.data.differentials: warnings.warn( "No velocity defined on frame, assuming {}.".format( ZERO_VELOCITIES), NoVelocityWarning) coord = attach_zero_velocities(coord) return coord def replicate(self, value=None, unit=None, observer=None, target=None, radial_velocity=None, redshift=None, doppler_convention=None, doppler_rest=None, copy=False): """ Return a replica of the `SpectralCoord`, optionally changing the values or attributes. Note that no conversion is carried out by this method - this keeps all the values and attributes the same, except for the ones explicitly passed to this method which are changed. If ``copy`` is set to `True` then a full copy of the internal arrays will be made. By default the replica will use a reference to the original arrays when possible to save memory. Parameters ---------- value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional Spectral values, which should be either wavelength, frequency, energy, wavenumber, or velocity values. unit : str or `~astropy.units.Unit` Unit for the given spectral values. observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of observer. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional The coordinate (position and velocity) of target. radial_velocity : `~astropy.units.Quantity`, optional The radial velocity of the target with respect to the observer. redshift : float, optional The relativistic redshift of the target with respect to the observer. doppler_rest : `~astropy.units.Quantity`, optional The rest value to use when expressing the spectral value as a velocity. doppler_convention : str, optional The Doppler convention to use when expressing the spectral value as a velocity. copy : bool, optional If `True`, and ``value`` is not specified, the values are copied to the new `SkyCoord` - otherwise a reference to the same values is used. Returns ------- sc : `SpectralCoord` object Replica of this object """ if isinstance(value, u.Quantity): if unit is not None: raise ValueError("Cannot specify value as a Quantity and also specify unit") else: value, unit = value.value, value.unit value = value if value is not None else self.value unit = unit or self.unit observer = self._validate_coordinate(observer) or self.observer target = self._validate_coordinate(target) or self.target doppler_convention = doppler_convention or self.doppler_convention doppler_rest = doppler_rest or self.doppler_rest # If value is being taken from self and copy is Tru if copy: value = value.copy() # Only include radial_velocity if it is not auto-computed from the # observer and target. if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None: radial_velocity = self.radial_velocity with warnings.catch_warnings(): warnings.simplefilter('ignore', NoVelocityWarning) return self.__class__(value=value, unit=unit, observer=observer, target=target, radial_velocity=radial_velocity, redshift=redshift, doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False) @property def quantity(self): """ Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`. Equivalent to ``self.view(u.Quantity)``. Returns ------- `~astropy.units.Quantity` This object viewed as a `~astropy.units.Quantity`. """ return self.view(u.Quantity) @property def observer(self): """ The coordinates of the observer. If set, and a target is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the observation. """ return self._observer @observer.setter def observer(self, value): if self.observer is not None: raise ValueError("observer has already been set") self._observer = self._validate_coordinate(value, label='observer') # Switch to auto-computing radial velocity if self._target is not None: self._radial_velocity = None @property def target(self): """ The coordinates of the target being observed. If set, and an observer is set as well, this will override any explicit radial velocity passed in. Returns ------- `~astropy.coordinates.BaseCoordinateFrame` The astropy coordinate frame representing the target. """ return self._target @target.setter def target(self, value): if self.target is not None: raise ValueError("target has already been set") self._target = self._validate_coordinate(value, label='target') # Switch to auto-computing radial velocity if self._observer is not None: self._radial_velocity = None @property def radial_velocity(self): """ Radial velocity of target relative to the observer. Returns ------- `~astropy.units.Quantity` Radial velocity of target. Notes ----- This is different from the ``.radial_velocity`` property of a coordinate frame in that this calculates the radial velocity with respect to the *observer*, not the origin of the frame. """ if self._observer is None or self._target is None: if self._radial_velocity is None: return 0 * KMS else: return self._radial_velocity else: return self._calculate_radial_velocity(self._observer, self._target, as_scalar=True) @property def redshift(self): """ Redshift of target relative to observer. Calculated from the radial velocity. Returns ------- float Redshift of target. """ return _velocity_to_redshift(self.radial_velocity) @staticmethod def _calculate_radial_velocity(observer, target, as_scalar=False): """ Compute the line-of-sight velocity from the observer to the target. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` The frame of the observer. target : `~astropy.coordinates.BaseCoordinateFrame` The frame of the target. as_scalar : bool If `True`, the magnitude of the velocity vector will be returned, otherwise the full vector will be returned. Returns ------- `~astropy.units.Quantity` The radial velocity of the target with respect to the observer. """ # Convert observer and target to ICRS to avoid finite differencing # calculations that lack numerical precision. observer_icrs = observer.transform_to(ICRS()) target_icrs = target.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) d_vel = target_icrs.velocity - observer_icrs.velocity vel_mag = pos_hat.dot(d_vel) if as_scalar: return vel_mag else: return vel_mag * pos_hat @staticmethod def _normalized_position_vector(observer, target): """ Calculate the normalized position vector between two frames. Parameters ---------- observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame or coordinate. target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The target frame or coordinate. Returns ------- pos_hat : `BaseRepresentation` Position representation. """ d_pos = (target.cartesian.without_differentials() - observer.cartesian.without_differentials()) dp_norm = d_pos.norm() # Reset any that are 0 to 1 to avoid nans from 0/0 dp_norm[dp_norm == 0] = 1 * dp_norm.unit pos_hat = d_pos / dp_norm return pos_hat @u.quantity_input(velocity=u.km/u.s) def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False): """ A new `SpectralCoord` with the velocity of the observer altered, but not the position. If a coordinate frame is specified, the observer velocities will be modified to be stationary in the specified frame. If a coordinate instance is specified, optionally with non-zero velocities, the observer velocities will be updated so that the observer is co-moving with the specified coordinates. Parameters ---------- frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord` The observation frame in which the observer will be stationary. This can be the name of a frame (e.g. 'icrs'), a frame class, frame instance with no data, or instance with data. This can optionally include velocities. velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional If ``frame`` does not contain velocities, these can be specified as a 3-element `~astropy.units.Quantity`. In the case where this is also not specified, the velocities default to zero. preserve_observer_frame : bool If `True`, the final observer frame class will be the same as the original one, and if `False` it will be the frame of the velocity reference class. Returns ------- new_coord : `SpectralCoord` The new coordinate object representing the spectral data transformed based on the observer's new velocity frame. """ if self.observer is None or self.target is None: raise ValueError("This method can only be used if both observer " "and target are defined on the SpectralCoord.") # Start off by extracting frame if a SkyCoord was passed in if isinstance(frame, SkyCoord): frame = frame.frame if isinstance(frame, BaseCoordinateFrame): if not frame.has_data: frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km)) if frame.data.differentials: if velocity is not None: raise ValueError('frame already has differentials, cannot also specify velocity') # otherwise frame is ready to go else: if velocity is None: differentials = ZERO_VELOCITIES else: differentials = CartesianDifferential(velocity) frame = frame.realize_frame(frame.data.with_differentials(differentials)) if isinstance(frame, (type, str)): if isinstance(frame, type): frame_cls = frame elif isinstance(frame, str): frame_cls = frame_transform_graph.lookup_name(frame) if velocity is None: velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s elif velocity.shape != (3,): raise ValueError('velocity should be a Quantity vector with 3 elements') frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m, *velocity, representation_type='cartesian', differential_type='cartesian') observer = update_differentials_to_match(self.observer, frame, preserve_observer_frame=preserve_observer_frame) # Calculate the initial and final los velocity init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True) # Apply transformation to data new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) new_coord = self.replicate(value=new_data, observer=observer) return new_coord def with_radial_velocity_shift(self, target_shift=None, observer_shift=None): """ Apply a velocity shift to this spectral coordinate. The shift can be provided as a redshift (float value) or radial velocity (`~astropy.units.Quantity` with physical type of 'speed'). Parameters ---------- target_shift : float or `~astropy.units.Quantity` Shift value to apply to current target. observer_shift : float or `~astropy.units.Quantity` Shift value to apply to current observer. Returns ------- `SpectralCoord` New spectral coordinate with the target/observer velocity changed to incorporate the shift. This is always a new object even if ``target_shift`` and ``observer_shift`` are both `None`. """ if observer_shift is not None and (self.target is None or self.observer is None): raise ValueError("Both an observer and target must be defined " "before applying a velocity shift.") for arg in [x for x in [target_shift, observer_shift] if x is not None]: if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)): raise u.UnitsError("Argument must have unit physical type " "'speed' for radial velocty or " "'dimensionless' for redshift.") # The target or observer value is defined but is not a quantity object, # assume it's a redshift float value and convert to velocity if target_shift is None: if self._observer is None or self._target is None: return self.replicate() target_shift = 0 * KMS else: target_shift = u.Quantity(target_shift) if target_shift.unit.physical_type == 'dimensionless': target_shift = _redshift_to_velocity(target_shift) if self._observer is None or self._target is None: return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift), radial_velocity=self.radial_velocity + target_shift) if observer_shift is None: observer_shift = 0 * KMS else: observer_shift = u.Quantity(observer_shift) if observer_shift.unit.physical_type == 'dimensionless': observer_shift = _redshift_to_velocity(observer_shift) target_icrs = self._target.transform_to(ICRS()) observer_icrs = self._observer.transform_to(ICRS()) pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs) target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat target_velocity = CartesianDifferential(target_velocity.xyz) observer_velocity = CartesianDifferential(observer_velocity.xyz) new_target = (target_icrs .realize_frame(target_icrs.cartesian.with_differentials(target_velocity)) .transform_to(self._target)) new_observer = (observer_icrs .realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity)) .transform_to(self._observer)) init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True) fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True) new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel) return self.replicate(value=new_data, observer=new_observer, target=new_target) def to_rest(self): """ Transforms the spectral axis to the rest frame. """ if self.observer is not None and self.target is not None: return self.with_observer_stationary_relative_to(self.target) result = _apply_relativistic_doppler_shift(self, -self.radial_velocity) return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None) def __repr__(self): prefixstr = '<' + self.__class__.__name__ + ' ' try: radial_velocity = self.radial_velocity redshift = self.redshift except ValueError: radial_velocity = redshift = 'Undefined' repr_items = [f'{prefixstr}'] if self.observer is not None: observer_repr = indent(repr(self.observer), 14 * ' ').lstrip() repr_items.append(f' observer: {observer_repr}') if self.target is not None: target_repr = indent(repr(self.target), 12 * ' ').lstrip() repr_items.append(f' target: {target_repr}') if (self._observer is not None and self._target is not None) or self._radial_velocity is not None: if self.observer is not None and self.target is not None: repr_items.append(' observer to target (computed from above):') else: repr_items.append(' observer to target:') repr_items.append(f' radial_velocity={radial_velocity}') repr_items.append(f' redshift={redshift}') if self.doppler_rest is not None or self.doppler_convention is not None: repr_items.append(f' doppler_rest={self.doppler_rest}') repr_items.append(f' doppler_convention={self.doppler_convention}') arrstr = np.array2string(self.view(np.ndarray), separator=', ', prefix=' ') if len(repr_items) == 1: repr_items[0] += f'{arrstr}{self._unitstr:s}' else: repr_items[1] = ' (' + repr_items[1].lstrip() repr_items[-1] += ')' repr_items.append(f' {arrstr}{self._unitstr:s}') return '\n'.join(repr_items) + '>'
_compress
Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass # MASKED: _compress function (lines 1283-1356) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param)
1,283
1,356
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
_find_lib
Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried)
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) # MASKED: _find_lib function (lines 455-478) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
@staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None)
455
478
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
check_status
Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged)
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath # MASKED: check_status function (lines 535-571) def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status
535
571
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
set_backward
Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) # MASKED: set_backward function (lines 618-641) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff)
618
641
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
epoch_to_epoch16
Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) # MASKED: epoch_to_epoch16 function (lines 809-832) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2)
809
832
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
_datetime_to_tt2000_typepunned
Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) # MASKED: _datetime_to_tt2000_typepunned function (lines 969-1014) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents)
969
1,014
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
__init__
Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ # MASKED: __init__ function (lines 1216-1241) def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass
1,216
1,241
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
__init__
Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ # MASKED: __init__ function (lines 1605-1665) def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF"""
1,605
1,665
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
_open
Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) # MASKED: _open function (lines 1819-1837) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly)
1,819
1,837
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
readonly
Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value # MASKED: readonly function (lines 1979-2023) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value)
1,979
2,023
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
checksum
Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) # MASKED: checksum function (lines 2025-2050) def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value
2,025
2,050
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
__init__
Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ # MASKED: __init__ function (lines 2766-2804) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self))
2,766
2,804
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
_create
Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] # MASKED: _create function (lines 3013-3050) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value)
3,013
3,050
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
rv
Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes # MASKED: rv function (lines 3177-3201) def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value
3,177
3,201
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
dv
Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value # MASKED: dv function (lines 3203-3236) def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]]
3,203
3,236
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
__init__
Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ # MASKED: __init__ function (lines 3510-3571) def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major()
3,510
3,571
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
expand
Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] # MASKED: expand function (lines 3583-3607) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data)
3,583
3,607
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
__init__
Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ # MASKED: __init__ function (lines 4040-4085) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope)
4,040
4,085
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
__getitem__
Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) # MASKED: __getitem__ function (lines 4087-4109) def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.')
4,087
4,109
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
__len__
Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 # MASKED: __len__ function (lines 4287-4295) def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value
4,287
4,295
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
type
Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length # MASKED: type function (lines 5290-5307) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type)
5,290
5,307
#!/usr/bin/env python # -*- coding: utf-8 -*- """ das developers note: This a is modification of the original SpacePy pycdf package. All refereneces to the greater spacepy package have been removed to create a small standalone module. --cwp 2018-10-18 The libcdf.so location code has been changed to find the version installed in anaconda. --cwp 2020-04-06 This package provides a Python interface to the Common Data Format (CDF) library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/. It is targeted at Python 2.6+ and should work without change on either Python 2 or Python 3. The interface is intended to be 'pythonic' rather than reproducing the C interface. To open or close a CDF and access its variables, see the :class:`CDF` class. Accessing data within the variables is via the :class:`Var` class. The :data:`lib` object provides access to some routines that affect the functionality of the library in general. The :mod:`~pycdf.const` module contains constants useful for accessing the underlying library. Authors: Jon Niehof Institution: University of New Hampshire Contact: [email protected] Copyright 2010-2015 Los Alamos National Security, LLC. """ __contact__ = 'Jon Niehof, [email protected]' try: from collections.abc import MutableMapping, MutableSequence except ImportError: from collections import MutableMapping, MutableSequence import ctypes import ctypes.util import datetime import operator import os import os.path import shutil import sys import tempfile import warnings import weakref import numpy import numpy.ma #Import const AFTER library loaded, so failed load doesn't leave half-imported #from . import const try: str_classes = (str, bytes, unicode) except NameError: str_classes = (str, bytes) class Library(object): """ Abstraction of the base CDF C library and its state. Not normally intended for end-user use. An instance of this class is created at package load time as the :data:`~pycdf.lib` variable, providing access to the underlying C library if necessary. The CDF library itself is described in section 2.1 of the CDF user's guide, as well as the CDF C reference manual. Calling the C library directly requires knowledge of :mod:`ctypes`. Instantiating this object loads the C library, see :doc:`/pycdf` docs for details. .. autosummary:: ~Library.call ~Library.check_status ~Library.datetime_to_epoch ~Library.datetime_to_epoch16 ~Library.datetime_to_tt2000 ~Library.epoch_to_datetime ~Library.epoch_to_epoch16 ~Library.epoch_to_num ~Library.epoch_to_tt2000 ~Library.epoch16_to_datetime ~Library.epoch16_to_epoch ~Library.epoch16_to_tt2000 ~Library.set_backward supports_int8 ~Library.tt2000_to_datetime ~Library.tt2000_to_epoch ~Library.tt2000_to_epoch16 v_datetime_to_epoch v_datetime_to_epoch16 v_datetime_to_tt2000 v_epoch_to_datetime v_epoch_to_tt2000 v_epoch16_to_datetime v_epoch16_to_tt2000 v_tt2000_to_datetime v_tt2000_to_epoch v_tt2000_to_epoch16 libpath version .. automethod:: call .. automethod:: check_status .. automethod:: datetime_to_epoch .. automethod:: datetime_to_epoch16 .. automethod:: datetime_to_tt2000 .. automethod:: epoch_to_datetime .. automethod:: epoch_to_epoch16 .. automethod:: epoch_to_num .. automethod:: epoch_to_tt2000 .. automethod:: epoch16_to_datetime .. automethod:: epoch16_to_epoch .. automethod:: epoch16_to_tt2000 .. automethod:: set_backward .. attribute:: supports_int8 True if this library supports INT8 and TIME_TT2000 types; else False. .. automethod:: tt2000_to_datetime .. automethod:: tt2000_to_epoch .. automethod:: tt2000_to_epoch16 .. method:: v_datetime_to_epoch(datetime) A vectorized version of :meth:`datetime_to_epoch` which takes a numpy array of datetimes as input and returns an array of epochs. .. method:: v_datetime_to_epoch16(datetime) A vectorized version of :meth:`datetime_to_epoch16` which takes a numpy array of datetimes as input and returns an array of epoch16. .. method:: v_datetime_to_tt2000(datetime) A vectorized version of :meth:`datetime_to_tt2000` which takes a numpy array of datetimes as input and returns an array of TT2000. .. method:: v_epoch_to_datetime(epoch) A vectorized version of :meth:`epoch_to_datetime` which takes a numpy array of epochs as input and returns an array of datetimes. .. method:: v_epoch_to_tt2000(epoch) A vectorized version of :meth:`epoch_to_tt2000` which takes a numpy array of epochs as input and returns an array of tt2000s. .. method:: v_epoch16_to_datetime(epoch0, epoch1) A vectorized version of :meth:`epoch16_to_datetime` which takes a numpy array of epoch16 as input and returns an array of datetimes. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_epoch16_to_tt2000(epoch16) A vectorized version of :meth:`epoch16_to_tt2000` which takes a numpy array of epoch16 as input and returns an array of tt2000s. An epoch16 is a pair of doubles; the input array's last dimension must be two (and the returned array will have one fewer dimension). .. method:: v_tt2000_to_datetime(tt2000) A vectorized version of :meth:`tt2000_to_datetime` which takes a numpy array of tt2000 as input and returns an array of datetimes. .. method:: v_tt2000_to_epoch(tt2000) A vectorized version of :meth:`tt2000_to_epoch` which takes a numpy array of tt2000 as input and returns an array of epochs. .. method:: v_tt2000_to_epoch16(tt2000) A vectorized version of :meth:`tt2000_to_epoch16` which takes a numpy array of tt2000 as input and returns an array of epoch16. .. attribute:: libpath The path where pycdf found the CDF C library, potentially useful in debugging. If this contains just the name of a file (with no path information), then the system linker found the library for pycdf. On Linux, ``ldconfig -p`` may be useful for displaying the system's library resolution. .. attribute:: version Version of the CDF library, (version, release, increment, subincrement) """ def __init__(self, libpath=None, library=None): """Load the CDF C library. Searches for the library in the order: 1. Appropriately-named file in CDF_LIB 2. Appropriately-named file in CDF_BASE 3. Standard library search path @raise CDFError: BAD_DATA_TYPE if can't map types properly """ if not 'CDF_TMP' in os.environ: os.environ['CDF_TMP'] = tempfile.gettempdir() if not library: if not libpath: self.libpath, self._library = self._find_lib() if self._library is None: raise Exception(( 'Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(self.libpath))) else: self._library = ctypes.CDLL(libpath) self.libpath = libpath else: self._library = library self.libpath = libpath self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here self._library.EPOCHbreakdown.restype = ctypes.c_long self._library.computeEPOCH.restype = ctypes.c_double self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7 self._library.computeEPOCH16.restype = ctypes.c_double self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \ [ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDFsetFileBackward'): self._library.CDFsetFileBackward.restype = None self._library.CDFsetFileBackward.argtypes = [ctypes.c_long] #Map old name to the 3.7.1+ name if not hasattr(self._library, 'computeTT2000') \ and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'): self._library.computeTT2000 \ = self._library.CDF_TT2000_from_UTC_parts if hasattr(self._library, 'computeTT2000'): self._library.computeTT2000.restype = ctypes.c_longlong self._library.computeTT2000.argtypes = \ [ctypes.c_double] *9 #Map old name to the 3.7.1+ name if not hasattr(self._library, 'breakdownTT2000') \ and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'): self._library.breakdownTT2000 \ = self._library.CDF_TT2000_to_UTC_parts if hasattr(self._library, 'breakdownTT2000'): self._library.breakdownTT2000.restype = None self._library.breakdownTT2000.argtypes = \ [ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9 if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'): self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'): self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double] if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'): self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \ [ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)] if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'): self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \ ctypes.c_longlong self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \ [ctypes.POINTER(ctypes.c_double * 2)] #Get CDF version information ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) sub = ctypes.c_char(b' ') self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver), const.GET_, const.LIB_RELEASE_, ctypes.byref(rel), const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc), const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub)) ver = ver.value rel = rel.value inc = inc.value sub = sub.value self.version = (ver, rel, inc, sub) self._del_middle_rec_bug = ver < 3 or (ver == 3 and (rel < 4 or (rel == 4 and inc < 1))) self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4)) self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE', const.CDF_CHAR.value: 'CDF_CHAR', const.CDF_INT1.value: 'CDF_INT1', const.CDF_UCHAR.value: 'CDF_UCHAR', const.CDF_UINT1.value: 'CDF_UINT1', const.CDF_INT2.value: 'CDF_INT2', const.CDF_UINT2.value: 'CDF_UINT2', const.CDF_INT4.value: 'CDF_INT4', const.CDF_UINT4.value: 'CDF_UINT4', const.CDF_INT8.value: 'CDF_INT8', const.CDF_FLOAT.value: 'CDF_FLOAT', const.CDF_REAL4.value: 'CDF_REAL4', const.CDF_DOUBLE.value: 'CDF_DOUBLE', const.CDF_REAL8.value: 'CDF_REAL8', const.CDF_EPOCH.value: 'CDF_EPOCH', const.CDF_EPOCH16.value: 'CDF_EPOCH16', const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000', } self.numpytypedict = {const.CDF_BYTE.value: numpy.int8, const.CDF_CHAR.value: numpy.int8, const.CDF_INT1.value: numpy.int8, const.CDF_UCHAR.value: numpy.uint8, const.CDF_UINT1.value: numpy.uint8, const.CDF_INT2.value: numpy.int16, const.CDF_UINT2.value: numpy.uint16, const.CDF_INT4.value: numpy.int32, const.CDF_UINT4.value: numpy.uint32, const.CDF_INT8.value: numpy.int64, const.CDF_FLOAT.value: numpy.float32, const.CDF_REAL4.value: numpy.float32, const.CDF_DOUBLE.value: numpy.float64, const.CDF_REAL8.value: numpy.float64, const.CDF_EPOCH.value: numpy.float64, const.CDF_EPOCH16.value: numpy.dtype((numpy.float64, 2)), const.CDF_TIME_TT2000.value: numpy.int64, } self.timetypes = [const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value] if not self.supports_int8: del self.cdftypenames[const.CDF_INT8.value] del self.numpytypedict[const.CDF_INT8.value] del self.cdftypenames[const.CDF_TIME_TT2000.value] del self.numpytypedict[const.CDF_TIME_TT2000.value] elif sys.platform.startswith('linux') \ and os.uname()[4].startswith('arm') \ and hasattr(self._library, 'computeTT2000') \ and self._library.computeTT2000( 2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000: #TT2000 call failed, so probably need to type-pun #double arguments to variadic functions. #Calling convention for non-variadic functions with floats #is unique, but convention for ints is same as variadic. #So type-pun arguments to integers to force that calling #convention. if ctypes.sizeof(ctypes.c_longlong) != \ ctypes.sizeof(ctypes.c_double): warnings.warn('ARM with unknown type sizes; ' 'TT2000 functions will not work.') else: self._library.computeTT2000.argtypes = \ [ctypes.c_longlong] * 9 c_ll_p = ctypes.POINTER(ctypes.c_longlong) if self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( 2010)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 1)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) != 315576066184000000: warnings.warn('ARM with unknown calling convention; ' 'TT2000 functions will not work.') self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned v_epoch16_to_datetime = numpy.frompyfunc( self.epoch16_to_datetime, 2, 1) self.v_epoch16_to_datetime = \ lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1]) self.v_epoch_to_datetime = numpy.frompyfunc( self.epoch_to_datetime, 1, 1) self.v_tt2000_to_datetime = numpy.frompyfunc( self.tt2000_to_datetime, 1, 1) self.v_datetime_to_epoch = numpy.vectorize( self.datetime_to_epoch, otypes=[numpy.float64]) v_datetime_to_epoch16 = numpy.frompyfunc( self.datetime_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_datetime_to_epoch16(x): retval = numpy.require(v_datetime_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_datetime_to_epoch16 = _v_datetime_to_epoch16 self.v_datetime_to_tt2000 = numpy.vectorize( self.datetime_to_tt2000, otypes=[numpy.int64]) self.v_epoch_to_tt2000 = numpy.vectorize( self.epoch_to_tt2000, otypes=[numpy.int64]) self.v_tt2000_to_epoch = numpy.vectorize( self.tt2000_to_epoch, otypes=[numpy.float64]) v_epoch16_to_tt2000 = numpy.frompyfunc( self.epoch16_to_tt2000, 2, 1) self.v_epoch16_to_tt2000 = \ lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1]) v_tt2000_to_epoch16 = numpy.frompyfunc( self.tt2000_to_epoch16, 1, 2) #frompyfunc returns a TUPLE of the returned values, #implicitly the 0th dimension. We want everything from one #call paired, so this rolls the 0th dimension to the last #(via the second-to-last) def _v_tt2000_to_epoch16(x): retval = numpy.require(v_tt2000_to_epoch16(x), dtype=numpy.float64) if len(retval.shape) > 1: return numpy.rollaxis( numpy.rollaxis(retval, 0, -1), -1, -2) else: return retval self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16 if not self.supports_int8: self.datetime_to_tt2000 = self._bad_tt2000 self.tt2000_to_datetime = self._bad_tt2000 self.v_datetime_to_tt2000 = self._bad_tt2000 self.v_tt2000_to_datetime = self._bad_tt2000 self.epoch_to_tt2000 = self._bad_tt2000 self.v_epoch_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch = self._bad_tt2000 self.v_tt2000_to_epoch = self._bad_tt2000 self.epoch_16_to_tt2000 = self._bad_tt2000 self.v_epoch16_to_tt2000 = self._bad_tt2000 self.tt2000_to_epoch16 = self._bad_tt2000 self.v_tt2000_to_epoch16 = self._bad_tt2000 #Default to V2 CDF self.set_backward(True) @staticmethod def _find_lib(): """ Search for the CDF library Searches in likely locations for CDF libraries and attempts to load them. Stops at first successful load and, if fails, reports all the files that were tried as libraries. Returns ======= out : tuple This is either (path to library, loaded library) or, in the event of failure, (None, list of libraries tried) """ failed = [] for libpath in Library._lib_paths(): try: lib = ctypes.CDLL(libpath) except: failed.append(libpath) else: return libpath, lib return (failed, None) @staticmethod def _lib_paths(): """Find candidate paths for the CDF library Does not check that the library is actually in any particular directory, just returns a list of possible locations, in priority order. Returns ======= out : generator of str paths that look like the CDF library """ #What the library might be named names = { 'win32': ['cdf.dll'], 'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'], 'linux2': ['libcdf.so'], 'linux': ['libcdf.so'], } names = names.get(sys.platform, ['libcdf.so']) #All existing CDF-library-like paths within a directory search_dir = lambda x: \ [os.path.join(x, fname) for fname in names if os.path.exists(os.path.join(x, fname))] # Only use anaconda locations... # Defined during builds ... if 'PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')): yield p # defined when conda is activated ... if 'CONDA_PREFIX' in os.environ: if sys.platform == 'win32': for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')): yield p else: for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')): yield p # Special subdirectory for anaconda unix packages on windows if 'LIBRARY_BIN' in os.environ: for p in search_dir(os.environ['LIBRARY_BIN']): yield p ctypespath = ctypes.util.find_library( 'cdf.dll' if sys.platform == 'win32' else 'cdf') if ctypespath: yield ctypespath def check_status(self, status, ignore=()): """ Raise exception or warning based on return status of CDF call Parameters ========== status : int status returned by the C library Other Parameters ================ ignore : sequence of ctypes.c_long CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. (Default none). Raises ====== CDFError : if status < CDF_WARN, indicating an error Warns ===== CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning. Returns ======= out : int status (unchanged) """ if status == const.CDF_OK or status in ignore: return status if status < const.CDF_WARN: raise CDFError(status) else: warning = CDFWarning(status) warning.warn() return status def call(self, *args, **kwargs): """ Call the CDF internal interface Passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with :meth:`check_status`. Terminal NULL is automatically added to args. Parameters ========== args : various, see :mod:`ctypes` Passed directly to the CDF library interface. Useful constants are defined in the :mod:`~pycdf.const` module. Other Parameters ================ ignore : sequence of CDF statuses sequence of CDF statuses to ignore. If any of these is returned by CDF library, any related warnings or exceptions will *not* be raised. Returns ======= out : int CDF status from the library Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ if 'ignore' in kwargs: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) ), kwargs['ignore']) else: return self.check_status(self._library.CDFlib( *(args + (const.NULL_, )) )) def set_backward(self, backward=True): """ Set backward compatibility mode for new CDFs Unless backward compatible mode is set, CDF files created by the version 3 library can not be read by V2. Parameters ========== backward : boolean Set backward compatible mode if True; clear it if False. Raises ====== ValueError : if backward=False and underlying CDF library is V2 """ if self.version[0] < 3: if not backward: raise ValueError( 'Cannot disable backward-compatible mode for CDF version 2.') else: return self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward else const.BACKWARDFILEoff) def epoch_to_datetime(self, epoch): """ Converts a CDF epoch value to a datetime Parameters ========== epoch : float epoch value from CDF Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) self._library.EPOCHbreakdown(ctypes.c_double(epoch), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000) else: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, msec.value * 1000) def datetime_to_epoch(self, dt): """ Converts a Python datetime to a CDF Epoch value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : float epoch corresponding to dt See Also ======== v_datetime_to_epoch """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) micro = dt.microsecond % 1000 if micro >= 500 and dt.year < 9999: dt += datetime.timedelta(0, 0, 1000) return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000)) def epoch16_to_datetime(self, epoch0, epoch1): """ Converts a CDF epoch16 value to a datetime .. note:: The call signature has changed since SpacePy 0.1.2. Formerly this method took a single argument with two values; now it requires two arguments (one for each value). To convert existing code, replace ``epoch16_to_datetime(epoch)`` with ``epoch16_to_datetime(*epoch)``. Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_epoch16_to_datetime """ yyyy = ctypes.c_long(0) mm = ctypes.c_long(0) dd = ctypes.c_long(0) hh = ctypes.c_long(0) min = ctypes.c_long(0) sec = ctypes.c_long(0) msec = ctypes.c_long(0) usec = ctypes.c_long(0) nsec = ctypes.c_long(0) psec = ctypes.c_long(0) self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec), ctypes.byref(psec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) micro = int(float(msec.value) * 1000 + float(usec.value) + float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5) if micro < 1000000: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime(yyyy.value, mm.value, dd.value, hh.value, min.value, sec.value, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_epoch16(self, dt): """ Converts a Python datetime to a CDF Epoch16 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : list of float epoch16 corresponding to dt See Also ======== v_datetime_to_epoch16 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt.replace(tzinfo=None) #Default to "illegal epoch" epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0, 0, epoch16): return (-1., -1.) #Failure, so illegal epoch return (epoch16[0], epoch16[1]) def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2) def epoch_to_num(self, epoch): """ Convert CDF EPOCH to matplotlib number. Same output as :func:`~matplotlib.dates.date2num` and useful for plotting large data sets without converting the times through datetime. Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : double Floating point number representing days since 0001-01-01. """ #date2num day 1 is 1/1/1 00UT #epoch 1/1/1 00UT is 31622400000.0 (millisecond) return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0 def epoch16_to_epoch(self, epoch16): """ Converts a CDF EPOCH16 to a CDF EPOCH value Parameters ========== epoch16 : (double, double) EPOCH16 to convert. Lists and numpy arrays are acceptable. LAST dimension should be 2: the two pairs of EPOCH16 Returns ======= out : double EPOCH corresponding to epoch16 """ e = numpy.require(epoch16, numpy.float64) return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9) def tt2000_to_datetime(self, tt2000): """ Converts a CDF TT2000 value to a datetime .. note:: Although TT2000 values support leapseconds, Python's datetime object does not. Any times after 23:59:59.999999 will be truncated to 23:59:59.999999. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : :class:`datetime.datetime` date and time corresponding to epoch. Invalid values are set to usual epoch invalid value, i.e. last moment of year 9999. See Also ======== v_tt2000_to_datetime """ yyyy = ctypes.c_double(0) mm = ctypes.c_double(0) dd = ctypes.c_double(0) hh = ctypes.c_double(0) min = ctypes.c_double(0) sec = ctypes.c_double(0) msec = ctypes.c_double(0) usec = ctypes.c_double(0) nsec = ctypes.c_double(0) self._library.breakdownTT2000( ctypes.c_longlong(tt2000), ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd), ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec), ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec)) if yyyy.value <= 0: return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999) sec = int(sec.value) if sec >= 60: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), 59, 999999) micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5) if micro < 1000000: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro) else: add_sec = int(micro / 1000000) try: return datetime.datetime( int(yyyy.value), int(mm.value), int(dd.value), int(hh.value), int(min.value), sec, micro - add_sec * 1000000) + \ datetime.timedelta(seconds=add_sec) except OverflowError: return datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999) def datetime_to_tt2000(self, dt): """ Converts a Python datetime to a CDF TT2000 value Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, int(dt.microsecond / 1000), dt.microsecond % 1000, 0) def _datetime_to_tt2000_typepunned(self, dt): """ Converts a Python datetime to a CDF TT2000 value Typepunned version that passes doubles as longlongs, to get around ARM calling convention oddness. Parameters ========== dt : :class:`datetime.datetime` date and time to convert Returns ======= out : int tt2000 corresponding to dt See Also ======== v_datetime_to_tt2000 """ c_ll_p = ctypes.POINTER(ctypes.c_longlong) if dt.tzinfo != None and dt.utcoffset() != None: dt = dt - dt.utcoffset() dt = dt.replace(tzinfo=None) if dt == datetime.datetime.max: return -2**63 return self._library.computeTT2000( ctypes.cast(ctypes.pointer(ctypes.c_double( dt.year)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.month)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.day)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.hour)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.minute)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.second)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond // 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( dt.microsecond % 1000)), c_ll_p).contents, ctypes.cast(ctypes.pointer(ctypes.c_double( 0)), c_ll_p).contents) def epoch_to_tt2000(self, epoch): """ Converts a CDF EPOCH to a CDF TT2000 value Parameters ========== epoch : double EPOCH to convert Returns ======= out : int tt2000 corresponding to epoch See Also ======== v_epoch_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH(epoch) def tt2000_to_epoch(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH .. note:: Although TT2000 values support leapseconds, CDF EPOCH values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double EPOCH corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch """ return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000) def epoch16_to_tt2000(self, epoch0, epoch1): """ Converts a CDF epoch16 value to TT2000 .. note:: Because TT2000 does not support picoseconds, the picoseconds value in epoch is ignored (i.e., truncated.) Parameters ========== epoch0 : float epoch16 value from CDF, first half epoch1 : float epoch16 value from CDF, second half Raises ====== EpochError : if input invalid Returns ======= out : long TT2000 corresponding to epoch. See Also ======== v_epoch16_to_tt2000 """ return self._library.CDF_TT2000_from_UTC_EPOCH16( (ctypes.c_double * 2)(epoch0, epoch1)) def tt2000_to_epoch16(self, tt2000): """ Converts a CDF TT2000 value to a CDF EPOCH16 .. note:: Although TT2000 values support leapseconds, CDF EPOCH16 values do not. Times during leapseconds are rounded up to beginning of the next day. Parameters ========== tt2000 : int TT2000 value from CDF Raises ====== EpochError : if input invalid Returns ======= out : double, double EPOCH16 corresponding to the TT2000 input time See Also ======== v_tt2000_to_epoch16 """ #Default to "illegal epoch" if isn't populated epoch16 = (ctypes.c_double * 2)(-1., -1.) if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16): return (-1., -1.) #Failure; illegal epoch return (epoch16[0], epoch16[1]) def _bad_tt2000(*args, **kwargs): """Convenience function for complaining that TT2000 not supported""" raise NotImplementedError( 'TT2000 functions require CDF library 3.4.0 or later') def download_library(): """Download and install the CDF library""" if sys.platform != 'win32': raise NotImplementedError( 'CDF library install only supported on Windows') try: import html.parser as HTMLParser except ImportError: import HTMLParser #https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj class LinkParser(HTMLParser.HTMLParser, object): def __init__(self, *args, **kwargs): self.links_found = [] super(LinkParser, self).__init__(*args, **kwargs) def handle_starttag(self, tag, attrs): if tag != 'a' or attrs[0][0] != 'href': return self.links_found.append(attrs[0][1]) import re import subprocess try: import urllib.request as u except ImportError: import urllib as u # Removed reference to spacepy #import spacepy #if spacepy.config.get('user_agent', None): # class AppURLopener(u.FancyURLopener): # version = spacepy.config['user_agent'] # u._urlopener = AppURLopener() baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/' url = u.urlopen(baseurl) listing = url.read() url.close() p = LinkParser() p.feed(listing) cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)] if not cdfdist: raise RuntimeError( "Couldn't find CDF distribution directory to download") cdfdist.sort(key=lambda x: x.rstrip('/').split('_')) cdfverbase = cdfdist[-1].rstrip('/') instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \ '-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4) insturl = baseurl + cdfverbase + '/windows/' + instfname tmpdir = tempfile.mkdtemp() try: fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname)) subprocess.check_call([fname, '/install', '/q1'], shell=False) finally: shutil.rmtree(tmpdir) _libpath, _library = Library._find_lib() if _library is None: raise Exception(('Cannot load CDF C library; checked {0}. ' 'Try \'os.environ["CDF_LIB"] = library_directory\' ' 'before import.').format(', '.join(_libpath))) from . import const lib = Library(_libpath, _library) """Module global library object. Initalized at module load time so all classes have ready access to the CDF library and a common state. E.g: >>> import pycdf >>> pycdf.lib.version (3, 3, 0, ' ') """ class CDFException(Exception): """ Base class for errors or warnings in the CDF library. Not normally used directly, but in subclasses :class:`CDFError` and :class:`CDFWarning`. Error messages provided by this class are looked up from the underlying C library. """ def __init__(self, status): """ Create a CDF Exception Uses CDF C library to look up an appropriate error message. Parameters ========== status : ctypes.c_long CDF status """ self.status = status self.string = 'CDF error ' + repr(status) + ', unable to get details.' message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1) try: retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_, ctypes.c_long(status), const.GET_, const.STATUS_TEXT_, message, const.NULL_) if retval == const.CDF_OK: if isinstance(message.value, str): self.string = message.value elif isinstance(message.value, bytes): self.string = message.value.decode() except: pass def __str__(self): """ Error string associated with the library error. Returns ======= out : str Error message from the CDF library. """ return self.string class CDFError(CDFException): """Raised for an error in the CDF library.""" pass class CDFWarning(CDFException, UserWarning): """Used for a warning in the CDF library.""" def warn(self, level=4): """ Issues a warning based on the information stored in my exception Intended for use in check_status or similar wrapper function. Other Parameters ================ level : int optional (default 3), how far up the stack the warning should be reported. Passed directly to :class:`warnings.warn`. """ warnings.warn(self, self.__class__, level) class EpochError(Exception): """Used for errors in epoch routines""" pass def _compress(obj, comptype=None, param=None): """Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param obj: object on which to set or check compression @type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var` @param comptype: type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :py:mod:`pycdf.const`. If not specified, will not change compression. @type comptype: ctypes.c_long @param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) @type param: ctypes.c_long @return: (comptype, param) currently in effect @rtype: tuple """ if isinstance(obj, CDF): COMPRESSION_ = const.CDF_COMPRESSION_ elif isinstance(obj, Var): COMPRESSION_ = const.zVAR_COMPRESSION_ else: raise ValueError('Must specify a CDF or Var type.') validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)], const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs], const.HUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.AHUFF_COMPRESSION.value: [const.OPTIMAL_ENCODING_TREES], const.GZIP_COMPRESSION.value: [ctypes.c_long(5), ctypes.c_long(1), ctypes.c_long(2), ctypes.c_long(3), ctypes.c_long(4), ctypes.c_long(6), ctypes.c_long(7), ctypes.c_long(8), ctypes.c_long(9), ], } comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION, const.GZIP_COMPRESSION] comptypevalues = [i.value for i in comptypes] if comptype != None: if not hasattr(comptype, 'value'): comptype = ctypes.c_long(comptype) if param is None: if not comptype.value in validparams: raise CDFError(const.BAD_COMPRESSION) param = validparams[comptype.value][0] paramlist = (ctypes.c_long * 1)(param) obj._call(const.PUT_, COMPRESSION_, comptype, paramlist) params = (ctypes.c_long * const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS)) comptype = ctypes.c_long(0) percent = ctypes.c_long(0) obj._call(const.GET_, COMPRESSION_, ctypes.byref(comptype), ctypes.byref(params), ctypes.byref(percent)) param = params[0] if not comptype.value in comptypevalues: raise CDFError(const.BAD_COMPRESSION) validparamvalues = [i.value for i in validparams[comptype.value]] if not param in validparamvalues: raise CDFError(const.BAD_COMPRESSION_PARM) comptype = comptypes[comptypevalues.index(comptype.value)] if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION): param = validparams[comptype.value][validparamvalues.index(param)] return (comptype, param) class CDF(MutableMapping): """ Python object representing a CDF file. Open or create a CDF file by creating an object of this class. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. A readonly CDF with many variables may be slow to close. See :meth:`readonly`. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :meth:`close` or :meth:`save` when done. .. note:: Existing CDF files are opened read-only by default, see :meth:`readonly` to change. CDF supports the `with <http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_ keyword, like other file objects, so: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... #do brilliant things with the CDF will open the CDF, execute the indented statements, and close the CDF when finished or when an error occurs. The `python docs <http://docs.python.org/reference/compound_stmts.html#with>`_ include more detail on this 'context manager' ability. CDF objects behave like a python `dictionary <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_, where the keys are names of variables in the CDF, and the values, :class:`Var` objects. As a dictionary, they are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy to loop over all of the variables in a file. Some examples: #. List the names of all variables in the open CDF ``cdffile``: >>> cdffile.keys() >>> for k in cdffile: #Alternate ... print(k) #. Get a :class:`Var` object for the variable named ``Epoch``: >>> epoch = cdffile['Epoch'] #. Determine if a CDF contains a variable named ``B_GSE``: >>> if 'B_GSE' in cdffile: ... print('B_GSE is in the file') ... else: ... print('B_GSE is not in the file') #. Find how many variables are in the file: >>> print(len(cdffile)) #. Delete the variable ``Epoch`` from the open CDF file ``cdffile``: >>> del cdffile['Epoch'] #. Display a summary of variables and types in open CDF file ``cdffile``: >>> print(cdffile) #. Open the CDF named ``cdf_filename.cdf``, read *all* the data from all variables into dictionary ``data``, and close it when done or if an error occurs: >>> with pycdf.CDF('cdf_filename.cdf') as cdffile: ... data = cdffile.copy() This last example can be very inefficient as it reads the entire CDF. Normally it's better to treat the CDF as a dictionary and access only the data needed, which will be pulled transparently from disc. See :class:`Var` for more subtle examples. Potentially useful dictionary methods and related functions: - `in <http://docs.python.org/reference/expressions.html#in>`_ - `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_ - :py:func:`len` - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - :py:func:`sorted` - :py:func:`~spacepy.toolbox.dictree` The CDF user's guide section 2.2 has more background information on CDF files. The :attr:`~CDF.attrs` Python attribute acts as a dictionary referencing CDF attributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`gAttrList` for more on the dictionary of global attributes. Creating a new CDF from a master (skeleton) CDF has similar syntax to opening one: >>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf') This creates and opens ``cdf_filename.cdf`` as a copy of ``master_cdf_filename.cdf``. Using a skeleton CDF is recommended over making a CDF entirely from scratch, but this is possible by specifying a blank master: >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') When CDFs are created in this way, they are opened read-write, see :py:meth:`readonly` to change. By default, new CDFs (without a master) are created in version 2 (backward-compatible) format. To create a version 3 CDF, use :meth:`Library.set_backward`: >>> pycdf.lib.set_backward(False) >>> cdffile = pycdf.CDF('cdf_filename.cdf', '') Add variables by direct assignment, which will automatically set type and dimension based on the data provided: >>> cdffile['new_variable_name'] = [1, 2, 3, 4] or, if more control is needed over the type and dimensions, use :py:meth:`new`. Although it is supported to assign Var objects to Python variables for convenience, there are some minor pitfalls that can arise when changing a CDF that will not affect most users. This is only a concern when assigning a zVar object to a Python variable, changing the CDF through some other variable, and then trying to use the zVar object via the originally assigned variable. Deleting a variable: >>> var = cdffile['Var1'] >>> del cdffile['Var1'] >>> var[0] #fail, no such variable Renaming a variable: >>> var = cdffile['Var1'] >>> cdffile['Var1'].rename('Var2') >>> var[0] #fail, no such variable Renaming via the same variable works: >>> var = cdffile['Var1'] >>> var.rename('Var2') >>> var[0] #succeeds, aware of new name Deleting a variable and then creating another variable with the same name may lead to some surprises: >>> var = cdffile['Var1'] >>> var[...] = [1, 2, 3, 4] >>> del cdffile['Var1'] >>> cdffile.new('Var1', data=[5, 6, 7, 8] >>> var[...] [5, 6, 7, 8] .. autosummary:: ~CDF.attr_num ~CDF.attrs ~CDF.add_attr_to_cache ~CDF.add_to_cache ~CDF.backward ~CDF.checksum ~CDF.clear_attr_from_cache ~CDF.clear_from_cache ~CDF.clone ~CDF.close ~CDF.col_major ~CDF.compress ~CDF.copy ~CDF.from_data ~CDF.new ~CDF.raw_var ~CDF.readonly ~CDF.save ~CDF.var_num ~CDF.version .. attribute:: CDF.attrs Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. .. attribute:: CDF.backward True if this CDF was created in backward-compatible mode (for opening with CDF library before 3.x) .. automethod:: add_to_cache .. automethod:: add_attr_to_cache .. automethod:: attr_num .. automethod:: checksum .. automethod:: clear_from_cache .. automethod:: clear_attr_from_cache .. automethod:: clone .. automethod:: close .. automethod:: col_major .. automethod:: compress .. automethod:: copy .. automethod:: from_data .. automethod:: new .. automethod:: raw_var .. automethod:: readonly .. automethod:: save .. automethod:: var_num .. automethod:: version """ def __init__(self, pathname, masterpath=None, create=None, readonly=None): """Open or create a CDF file. Parameters ========== pathname : string name of the file to open or create masterpath : string name of the master CDF file to use in creating a new file. If not provided, an existing file is opened; if provided but evaluates to ``False`` (e.g., ``''``), an empty new CDF is created. create : bool Create a new CDF even if masterpath isn't provided readonly : bool Open the CDF read-only. Default True if opening an existing CDF; False if creating a new one. Raises ====== CDFError if CDF library reports an error CDFWarning if CDF library reports a warning and interpreter is set to error on warnings. Examples ======== Open a CDF by creating a CDF object, e.g.: >>> cdffile = pycdf.CDF('cdf_filename.cdf') Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save` when done. """ if masterpath is not None: #Looks like we want to create if create is False: raise ValueError('Cannot specify a master CDF without creating a CDF') if readonly is True: raise ValueError('Cannot create a CDF in readonly mode') if create and readonly: raise ValueError('Cannot create a CDF in readonly mode') try: self.pathname = pathname.encode() except AttributeError: raise ValueError( 'pathname must be string-like: {0}'.format(pathname)) self._handle = ctypes.c_void_p(None) self._opened = False if masterpath is None and not create: self._open(True if readonly is None else readonly) elif masterpath: self._from_master(masterpath.encode()) else: self._create() lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2)) self._attrlistref = weakref.ref(gAttrList(self)) self.backward = self.version()[0] < 3 self._var_nums = {} """Cache of name-to-number mappings for variables in this CDF""" self._attr_info = {} """Cache of name-to-(number, global) mappings for attributes in this CDF""" def __del__(self): """Destructor; called when CDF object is destroyed. Close CDF file if there is still a valid handle. .. note:: To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`. """ if self._opened: self.close() def __delitem__(self, name): """Delete a zVariable in this CDF, by name or number Parameters ========== name : string or int Name or number of the CDF variable .. note: Variable numbers may change if variables are added or removed. Examples ======== Delete the variable ``Epoch`` from the open CDF file ``cdffile``. >>> del cdffile['Epoch'] """ self[name]._delete() def __enter__(self): """Context manager entrance function.""" return self def __exit__(self, type, value, traceback): """Context manager exit function. Close CDF file. """ self.close() def __getitem__(self, name): """Gets a zVariable in this CDF, by name or number The CDF acts like a dict @param name: Name or number of the CDF variable @type name: string or int @return: CDF variable named or numbered L{name} @rtype: :py:class:`pycdf.Var` @raise KeyError: for pretty much any problem in lookup @note: variable numbers may change if variables are added or removed. """ try: return Var(self, name) except CDFException as e: raise KeyError('{0}: {1}'.format(name, e)) def __setitem__(self, name, data): """Writes data to a zVariable in this CDF If the zVariable does not exist, will create one matching L{data}. If it does exist, will attempt to write L{data} to it without changing the type or dimensions. @param name: name or number of the variable to write @type name: str or int @param data: data to write, or a :py:class:`pycdf.Var` to copy """ if isinstance(data, Var): self.clone(data, name) elif name in self: self[name][...] = data if hasattr(data, 'attrs'): self[name].attrs.clone(data.attrs) else: self.new(name, data) def __iter__(self, current = 0): """Iterates over zVars in CDF Iterators for dicts return keys @note: Returned in variable-number order """ while current < self.__len__(): name = self[current].name() value = (yield name) if value is None: current += 1 else: current = self[value]._num() current += 1 def __len__(self): """Implements 'length' of CDF (number of zVars) @return: number of zVars in the CDF @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count)) return count.value def __contains__(self, key): """Determines whether a particular variable name is in the CDF @note: Essentially an efficiency function; L{__iter__} is called if this isn't defined @param key: key/variable name to check @type key: string @return: True if L{key} is the name of a variable in CDF, else False @rtype: Boolean """ try: foo = self[key] return True except KeyError as e: expected = str(key) + \ ": NO_SUCH_VAR: Named variable not found in this CDF." if expected in e.args: return False raise def __repr__(self): """Returns representation of CDF Cannot return anything that can be eval'd to create a copy of the CDF, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<CDF:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the CDF This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all variables. (Attributes are not listed.) @return: description of the variables in the CDF @rtype: str """ if self._opened: return '\n'.join([key + ': ' + str(value) for (key, value) in sorted(self.items())]) #can get away with this sort because second value in tuple isn't #compared unless first are different, and variable name is unique. else: if isinstance(self.pathname, str): return 'Closed CDF {0}'.format(self.pathname) else: return 'Closed CDF {0}'.format(self.pathname.decode('ascii')) def _open(self, readonly=True): """Opens the CDF file (called on init) Will open an existing CDF file read/write. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle)) self._opened = True if readonly: #Default is RW self.readonly(readonly) def _create(self): """Creates (and opens) a new CDF file Created at ``pathname``. Assumes zero-dimension r variables Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle)) self._opened = True def _from_master(self, master_path): """Creates a new CDF from a master CDF file ``master_path`` is copied to ``pathname`` and opened. Parameters ========== master_path : string location of the master CDF file Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. .. note: Not intended for direct call; pass parameters to :py:class:`pycdf.CDF` constructor. """ if os.path.exists(self.pathname): raise CDFError(const.CDF_EXISTS) shutil.copy2(master_path, self.pathname) self._open(False) def _call(self, *args, **kwargs): """Select this CDF as current and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. Parameters ========== args : various, see :py:mod:`ctypes`. Passed directly to the CDF library interface. Useful constants are defined in the :doc:`const <pycdf_const>` module of this package. Returns ======= out : ctypes.c_long CDF status from the library .. note: Terminal NULL_ is automatically added to ``args``. Raises ====== CDFError : if CDF library reports an error CDFWarning : if CDF library reports a warning and interpreter is set to error on warnings. """ return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs) def clone(self, zVar, name=None, data=True): """ Clone a zVariable (from another CDF or this) into this CDF Parameters ========== zVar : :py:class:`Var` variable to clone Other Parameters ================ name : str Name of the new variable (default: name of the original) data : boolean (optional) Copy data, or only type, dimensions, variance, attributes? (default: True, copy data as well) Returns ======= out : :py:class:`Var` The newly-created zVar in this CDF """ if name is None: name = zVar.name() if name in self: del self[name] self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar._nelems()) self[name].compress(*zVar.compress()) self[name].attrs.clone(zVar.attrs) if data: r = zVar._raw zVar._raw = True self.raw_var(name)[...] = zVar[...] zVar._raw = r return zVar def col_major(self, new_col=None): """ Finds the majority of this CDF file Other Parameters ================ new_col : boolean Specify True to change to column-major, False to change to row major, or do not specify to check the majority rather than changing it. (default is check only) Returns ======= out : boolean True if column-major, false if row-major """ if new_col != None: new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR self._call(const.PUT_, const.CDF_MAJORITY_, new_maj) maj = ctypes.c_long(0) self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj)) if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value): raise CDFError(const.BAD_MAJORITY) return maj.value == const.COLUMN_MAJOR.value def readonly(self, ro=None): """ Sets or check the readonly status of this CDF If the CDF has been changed since opening, setting readonly mode will have no effect. .. note:: Closing a CDF that has been opened readonly, or setting readonly False, may take a substantial amount of time if there are many variables in the CDF, as a (potentially large) cache needs to be cleared. Consider specifying ``readonly=False`` when opening the file if this is an issue. However, this may make some reading operations slower. Other Parameters ================ ro : Boolean True to set the CDF readonly, False to set it read/write, or leave out to check only. Returns ======= out : Boolean True if CDF is read-only, else False Raises ====== CDFError : if bad mode is set """ if ro == True: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon) elif ro == False: self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff) mode = ctypes.c_long(0) self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode)) if mode.value == const.READONLYon.value: return True elif mode.value == const.READONLYoff.value: return False else: raise CDFError(const.BAD_READONLY_MODE.value) def checksum(self, new_val=None): """ Set or check the checksum status of this CDF. If checksums are enabled, the checksum will be verified every time the file is opened. Other Parameters ================ new_val : boolean True to enable checksum, False to disable, or leave out to simply check. Returns ======= out : boolean True if the checksum is enabled or False if disabled """ if new_val != None: self._call(const.PUT_, const.CDF_CHECKSUM_, const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM) chk = ctypes.c_long(0) self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk)) if not chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value): raise CDFError(const.BAD_CHECKSUM) return chk.value == const.MD5_CHECKSUM.value def close(self): """ Closes the CDF file Although called on object destruction (:meth:`~CDF.__del__`), to ensure all data are saved, the user should explicitly call :meth:`~CDF.close` or :meth:`~CDF.save`. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.CLOSE_, const.CDF_) self._opened = False def compress(self, comptype=None, param=None): """ Set or check the compression of this CDF Sets compression on entire *file*, not per-variable. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple (comptype, param) currently in effect See Also ======== :meth:`Var.compress` Examples ======== Set file ``cdffile`` to gzip compression, compression level 9: >>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9) """ return _compress(self, comptype, param) def new(self, name, data=None, type=None, recVary=True, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None): """ Create a new zVariable in this CDF .. note:: Either ``data`` or ``type`` must be specified. If type is not specified, it is guessed from ``data``. Parameters ========== name : str name of the new variable Other Parameters ================ data data to store in the new variable. If this has a an ``attrs`` attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it will be used to populate attributes of the new variable. type : ctypes.c_long CDF type of the variable, from :mod:`~pycdf.const`. See section 2.5 of the CDF user's guide for more information on CDF data types. recVary : boolean record variance of the variable (default True) dimVarys : list of boolean dimension variance of each dimension, default True for all dimensions. dims : list of int size of each dimension of this variable, default zero-dimensional. Note this is the dimensionality as defined by CDF, i.e., for record-varying variables it excludes the leading record dimension. See :py:class:`Var`. n_elements : int number of elements, should be 1 except for CDF_CHAR, for which it's the length of the string. compress : ctypes.c_long Compression to apply to this variable, default None. See :py:meth:`Var.compress`. compress_param : ctypes.c_long Compression parameter if compression used; reasonable default is chosen. See :py:meth:`Var.compress`. Returns ======= out : :py:class:`Var` the newly-created zVariable Raises ====== ValueError : if neither data nor sufficient typing information is provided. Notes ===== Any given data may be representable by a range of CDF types; if the type is not specified, pycdf will guess which the CDF types which can represent this data. This breaks down to: #. If input data is a numpy array, match the type of that array #. Proper kind (numerical, string, time) #. Proper range (stores highest and lowest number provided) #. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: #. Type that matches precision of data first, then #. integer type before float type, then #. Smallest type first, then #. signed type first, then #. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: #. absolute values between 0 and 3e-39 #. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. """ if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \ and self.backward: raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 ' 'in backward-compatible CDF') if not lib.supports_int8 and \ type in (const.CDF_INT8, const.CDF_TIME_TT2000): raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0') if data is None: if type is None: raise ValueError('Must provide either data or a CDF type.') if dims is None: dims = [] if n_elements is None: n_elements = 1 else: (guess_dims, guess_types, guess_elements) = _Hyperslice.types(data) if dims is None: if recVary: if guess_dims == (): raise ValueError( 'Record-varying data cannot be scalar. ' 'Specify NRV with CDF.new() or put data in array.') dims = guess_dims[1:] else: dims = guess_dims if type is None: type = guess_types[0] if type == const.CDF_EPOCH16.value and self.backward: type = const.CDF_EPOCH if n_elements is None: n_elements = guess_elements if dimVarys is None: dimVarys = [True for i in dims] recVary = const.VARY if recVary else const.NOVARY dimVarys = [const.VARY if dimVary else const.NOVARY for dimVary in dimVarys] if not hasattr(type, 'value'): type = ctypes.c_long(type) if type.value == const.CDF_INT8.value and not lib.supports_int8: raise ValueError( '64-bit integer support require CDF library 3.4.0') if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value) \ and self.backward: raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; ' 'incompatible with backward-compatible CDF') new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys) if compress != None: new_var.compress(compress, compress_param) if data is not None: new_var[...] = data if hasattr(data, 'attrs'): new_var.attrs.clone(data.attrs) return new_var def raw_var(self, name): """ Get a "raw" :class:`Var` object. Normally a :class:`Var` will perform translation of values for certain types (to/from Unicode for CHAR variables on Py3k, and to/from datetime for all time types). A "raw" object does not perform this translation, on read or write. This does *not* affect the data on disk, and in fact it is possible to maintain multiple Python objects with access to the same zVariable. Parameters ========== name : str name or number of the zVariable """ v = self[name] v._raw = True return v def save(self): """ Saves the CDF file but leaves it open. If closing the CDF, :meth:`close` is sufficient; there is no need to call :meth:`save` before :meth:`close`. .. note:: Relies on an undocumented call of the CDF C library, which is also used in the Java interface. Raises ====== CDFError : if CDF library reports an error Warns ===== CDFWarning : if CDF library reports a warning """ self._call(const.SAVE_, const.CDF_) def copy(self): """ Make a copy of all data and attributes in this CDF Returns ======= out : :py:class:`CDFCopy` :class:`~spacepy.datamodel.SpaceData`-like object of all data """ return CDFCopy(self) def version(self): """ Get version of library that created this CDF Returns ======= out : tuple version of CDF library, in form (version, release, increment) """ ver = ctypes.c_long(0) rel = ctypes.c_long(0) inc = ctypes.c_long(0) self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc)) return (ver.value, rel.value, inc.value) def _get_attrs(self): """Get attribute list Provide access to the CDF's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = gAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """Global attributes for this CDF in a dict-like format. See :class:`gAttrList` for details. """) def var_num(self, varname): """Get the variable number of a particular variable name This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : int Variable number of this zvariable. """ num = self._var_nums.get(varname, None) if num is None: #Copied from Var._get, which can hopefully be thinned varNum = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum)) num = varNum.value self._var_nums[varname] = num return num def attr_num(self, attrname): """Get the attribute number and scope by attribute name This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the zVariable. Not this is NOT a string in Python 3! Raises ====== CDFError : if variable is not found Returns ======= out : tuple attribute number, scope (True for global) of this attribute """ res = self._attr_info.get(attrname, None) if res is None: #Copied from Var._get, which can hopefully be thinned attrNum = ctypes.c_long(0) self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum)) scope = ctypes.c_long(0) self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) res = (attrNum.value, scope) self._attr_info[attrname] = res return res def clear_attr_from_cache(self, attrname): """Mark an attribute deleted in the name-to-number cache Will remove an attribute, and all attributes with higher numbers, from the attribute cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== attrname : bytes name of the attribute. Not this is NOT a string in Python 3! """ num, scope = self.attr_num(attrname) #All numbers higher than this are renumbered for a, n in list(self._attr_info.items()): if n[0] >= num: del self._attr_info[a] def clear_from_cache(self, varname): """Mark a variable deleted in the name-to-number cache Will remove a variable, and all variables with higher numbers, from the variable cache. Does NOT delete the variable! This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! """ num = self.var_num(varname) #All numbers higher than this are renumbered for v, n in list(self._var_nums.items()): if n >= num: del self._var_nums[v] def add_attr_to_cache(self, attrname, num, scope): """Add an attribute to the name-to-number cache This maintains a cache of name-to-number mappings for attributes to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable scope : bool True if global scope; False if variable scope. """ self._attr_info[attrname] = (num, scope) def add_to_cache(self, varname, num): """Add a variable to the name-to-number cache This maintains a cache of name-to-number mappings for zVariables to keep from having to query the CDF library constantly. It's mostly an internal function. Parameters ========== varname : bytes name of the zVariable. Not this is NOT a string in Python 3! num : int number of the variable """ self._var_nums[varname] = num #Note there is no function for delete, currently handled in Var.rename #and Attr.rename by just deleting from the dict directly. Maybe this #should be differen (maybe should be possible to follow a variable across #a rename...) class Var(MutableSequence): """ A CDF variable. This object does not directly store the data from the CDF; rather, it provides access to the data in a format that much like a Python list or numpy :class:`~numpy.ndarray`. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. The CDF user's guide, section 2.3, provides background on variables. .. note:: Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable. A record-varying variable's data are viewed as a hypercube of dimensions n_dims+1 (the extra dimension is the record number). They are indexed in row-major fashion, i.e. the last index changes most frequently / is contiguous in memory. If the CDF is column-major, the data are transformed to row-major before return. Non record-varying variables are similar, but do not have the extra dimension of record number. Variables can be subscripted by a multidimensional index to return the data. Indices are in row-major order with the first dimension representing the record number. If the CDF is column major, the data are reordered to row major. Each dimension is specified by standard Python `slice <http://docs.python.org/tutorial/introduction.html#strings>`_ notation, with dimensions separated by commas. The ellipsis fills in any missing dimensions with full slices. The returned data are lists; Python represents multidimensional arrays as nested lists. The innermost set of lists represents contiguous data. .. note:: numpy 'fancy indexing' is *not* supported. Degenerate dimensions are 'collapsed', i.e. no list of only one element will be returned if a single subscript is specified instead of a range. (To avoid this, specify a slice like 1:2, which starts with 1 and ends before 2). Two special cases: 1. requesting a single-dimension slice for a record-varying variable will return all data for that record number (or those record numbers) for that variable. 2. Requests for multi-dimensional variables may skip the record-number dimension and simply specify the slice on the array itself. In that case, the slice of the array will be returned for all records. In the event of ambiguity (e.g., single-dimension slice on a one-dimensional variable), case 1 takes priority. Otherwise, mismatch between the number of dimensions specified in the slice and the number of dimensions in the variable will cause an :exc:`~exceptions.IndexError` to be thrown. This all sounds very complicated but it is essentially attempting to do the 'right thing' for a range of slices. An unusual case is scalar (zero-dimensional) non-record-varying variables. Clearly they cannot be subscripted normally. In this case, use the ``[...]`` syntax meaning 'access all data.': >>> import pycdf >>> testcdf = pycdf.CDF('test.cdf', '') >>> variable = testcdf.new('variable', recVary=False, ... type=pycdf.const.CDF_INT4) >>> variable[...] = 10 >>> variable <Var: CDF_INT4 [] NRV > >>> variable[...] 10 Reading any empty non-record-varying variable will return an empty with the same *number* of dimensions, but all dimensions will be of zero length. The scalar is, again, a special case: due to the inability to have a numpy array which is both zero-dimensional and empty, reading an NRV scalar variable with no data will return an empty one-dimensional array. This is really not recommended. As a list type, variables are also `iterable <http://docs.python.org/tutorial/classes.html#iterators>`_; iterating over a variable returns a single complete record at a time. This is all clearer with examples. Consider a variable ``B_GSM``, with three elements per record (x, y, z components) and fifty records in the CDF. Then: 1. ``B_GSM[0, 1]`` is the y component of the first record. 2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z components of the 11th record. As a shortcut, if only one dimension is specified, it is assumed to be the record number, so this could also be written ``B_GSM[10]``. 3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a fifty-element list, each element itself being a three-element list of x, y, z components. Multidimensional example: consider fluxes stored as a function of pitch angle and energy. Such a variable may be called Flux and stored as a two-dimensional array, with the first dimension representing (say) ten energy steps and the second, eighteen pitch angle bins (ten degrees wide, centered from 5 to 175 degrees). Assume 100 records stored in the CDF (i.e. 100 different times). 1. ``Flux[4]`` is a list of ten elements, one per energy step, each element being a list of 18 fluxes, one per pitch bin. All are taken from the fifth record in the CDF. 2. ``Flux[4, :, 0:4]`` is the same record, all energies, but only the first four pitch bins (roughly, field-aligned). 3. ``Flux[..., 0:4]`` is a 100-element list (one per record), each element being a ten-element list (one per energy step), each containing fluxes for the first four pitch bins. This slicing notation is very flexible and allows reading specifically the desired data from the CDF. All data are, on read, converted to appropriate Python data types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to :class:`~datetime.datetime`. Data are returned in numpy arrays. .. note:: Although pycdf supports TIME_TT2000 variables, the Python :class:`~datetime.datetime` object does not support leap seconds. Thus, on read, any seconds past 59 are truncated to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds). Potentially useful list methods and related functions: - `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `in <http://docs.python.org/reference/expressions.html#in>`_ - `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_ - `len <http://docs.python.org/library/functions.html#len>`_ - `list comprehensions <http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_ - `sorted <http://docs.python.org/library/functions.html#sorted>`_ The topic of array majority can be very confusing; good background material is available at `IDL Array Storage and Indexing <http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief, *regardless of the majority stored in the CDF*, pycdf will always present the data in the native Python majority, row-major order, also known as C order. This is the default order in `NumPy <http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html #internal-memory-layout-of-an-ndarray>`_. However, packages that render image data may expect it in column-major order. If the axes seem 'swapped' this is likely the reason. The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing zAttributes (do not confuse the two); all the dictionary methods above also work on the attribute dictionary. See :class:`zAttrList` for more on the dictionary of attributes. With writing, as with reading, every attempt has been made to match the behavior of Python lists. You can write one record, many records, or even certain elements of all records. There is one restriction: only the record dimension (i.e. dimension 0) can be resized by write, as all records in a variable must have the same dimensions. Similarly, only whole records can be deleted. .. note:: Unusual error messages on writing data usually mean that pycdf is unable to interpret the data as a regular array of a single type matching the type and shape of the variable being written. A 5x4 array is supported; an irregular array where one row has five columns and a different row has six columns is not. Error messages of this type include: - ``Data must be well-formed, regular array of number, string, or datetime`` - ``setting an array element with a sequence.`` - ``shape mismatch: objects cannot be broadcast to a single shape`` For these examples, assume Flux has 100 records and dimensions [2, 3]. Rewrite the first record without changing the rest: >>> Flux[0] = [[1, 2, 3], [4, 5, 6]] Writes a new first record and delete all the rest: >>> Flux[...] = [[1, 2, 3], [4, 5, 6]] Write a new record in the last position and add a new record after: >>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]], ... [[11, 12, 13], [14, 15, 16]]] Insert two new records between the current number 5 and 6: >>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13], ... [14, 15, 16]]] This operation can be quite slow, as it requires reading and rewriting the entire variable. (CDF does not directly support record insertion.) Change the first element of the first two records but leave other elements alone: >>> Flux[0:2, 0, 0] = [1, 2] Remove the first record: >>> del Flux[0] Removes record 5 (the sixth): >>> del Flux[5] Due to the need to work around a bug in the CDF library, this operation can be quite slow. Delete *all data* from ``Flux``, but leave the variable definition intact: >>> del Flux[...] .. note:: Although this interface only directly supports zVariables, zMode is set on opening the CDF so rVars appear as zVars. See p.24 of the CDF user's guide; pyCDF uses zMode 2. .. autosummary:: ~Var.attrs ~Var.compress ~Var.copy ~Var.dtype ~Var.dv ~Var.insert ~Var.name ~Var.rename ~Var.rv ~Var.shape ~Var.type .. attribute:: Var.attrs zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. .. automethod:: compress .. automethod:: copy .. autoattribute:: dtype .. automethod:: dv .. automethod:: insert .. automethod:: name .. automethod:: rename .. automethod:: rv .. autoattribute:: shape .. automethod:: type """ def __init__(self, cdf_file, var_name, *args): """Create or locate a variable Parameters ========== cdf_file : :py:class:`pycdf.CDF` CDF file containing this variable var_name : string name of this variable Other Parameters ================ args additional arguments passed to :py:meth:`_create`. If none, opens an existing variable. If provided, creates a new one. Raises ====== CDFError if CDF library reports an error Warns ===== CDFWarning if CDF library reports a warning """ self.cdf_file = cdf_file #This is the definitive "identify" of variable self._name = None self._type = None #CDF type (long) self._raw = False #Raw access (skip all conversions) if len(args) == 0: self._get(var_name) else: self._create(var_name, *args) #Weak reference to attribute list (use attrs instead) #This avoids a reference loop self._attrlistref = weakref.ref(zAttrList(self)) def __getitem__(self, key): """Returns a slice from the data array. Details under :py:class:`pycdf.Var`. @return: The data from this variable @rtype: list-of-lists of appropriate type. @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) #Hyperslice mostly catches this sort of thing, but #an empty variable is a special case, since we might want to #WRITE to 0th record (which Hyperslice also supports) but #can't READ from it, and iterating over tries to read from it. if hslice.rv: if hslice.dimsizes[0] == 0 and hslice.degen[0] and \ hslice.starts[0] == 0: raise IndexError('record index out of range') #For NRV, again hslice will assume 0th record exists since we might #want to write. So ANY degenerate dim other than the glued-on 0th #suggests an explicit index that should fail. None degenerate suggests #make an empty array. #Note this is pulling a lot of hyperslice stuff into getitem! elif hslice.dimsizes[0] == 0: if len(hslice.degen) > 1 and max(hslice.degen[1:]): raise IndexError('record index out of range') else: #The zero-length dimension is degenerate so it gets chopped, #and you can't have a zero-length numpy array that still #maintains the size of all other dimensions. So just force #a zero-dim array and the rest will follow hslice.counts[...] = 0 #If this is a scalar, need to make a single non-degenerate #dimension so it can be empty. if len(hslice.counts) == 1: hslice.degen[0] = False result = hslice.create_array() if hslice.counts[0] != 0: hslice.select() lib.call(const.GET_, const.zVAR_HYPERDATA_, result.ctypes.data_as(ctypes.c_void_p)) return hslice.convert_input_array(result) def __delitem__(self, key): """Removes a record (or set of records) from the CDF Only whole records can be deleted, so the del call must either specify only one dimension or it must specify all elements of the non-record dimensions. This is *not* a way to resize a variable! Deleting records from the middle of a variable may be very slow in some circumstances. To work around a bug in CDF library versions 3.4.0 and before, all the data must be read in, the requested deletions done, and then all written back out. @param key: index or slice to delete @type key: int or slice @raise TypeError: if an attempt is made to delete from a non record-varying variable, or to delete below the record level """ if not self.rv(): raise TypeError('Cannot delete records from non-record-varying ' 'variable.') hslice = _Hyperslice(self, key) if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any(): raise TypeError('Can only delete entire records.') if hslice.counts[0] == 0: return start = hslice.starts[0] count = hslice.counts[0] interval = hslice.intervals[0] dimsize = hslice.dimsizes[0] self._call() dangerous_delete = False if lib._del_middle_rec_bug and \ (interval != 1 or (start != 0 and start + count < dimsize)): #delete from middle is dangerous if only have one index entry entries = ctypes.c_long(0) lib.call(const.GET_, const.zVAR_nINDEXENTRIES_, ctypes.byref(entries)) dangerous_delete = (entries.value == 1) if dangerous_delete: data = self[...] data = numpy.delete( data, numpy.arange(start, start + count * interval, interval), 0) self[0:dimsize - count] = data first_rec = dimsize - count last_rec = dimsize - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif interval == 1: first_rec = ctypes.c_long(start) last_rec = ctypes.c_long(start + count - 1) lib.call(const.DELETE_, const.zVAR_RECORDS_, first_rec, last_rec) else: self._call() #delete from end to avoid renumbering of records for recno in range(start + (count - 1) * interval, start - 1, -1 * interval): lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(recno), ctypes.c_long(recno)) def __setitem__(self, key, data): """Puts a slice into the data array. Details under :py:class:`pycdf.Var`. @param key: index or slice to store @type key: int or slice @param data: data to store @type data: numpy.array @raise IndexError: if L{key} is out of range, mismatches dimensions, or simply unparseable. IndexError will @raise CDFError: for errors from the CDF library """ hslice = _Hyperslice(self, key) n_recs = hslice.counts[0] hslice.expand(data) cdf_type = self.type() if cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) else: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=self._np_type()) if cdf_type == const.CDF_EPOCH16.value: datashape = data.shape[:-1] else: datashape = data.shape #Check data sizes if datashape != tuple(hslice.expected_dims()): raise ValueError('attempt to assign data of dimensions ' + str(datashape) + ' to slice of dimensions ' + str(tuple(hslice.expected_dims()))) #Flip majority and reversed dimensions, see convert_input_array data = hslice.convert_output_array(data) #Handle insertions and similar weirdness if hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Specified slice ends before last record, so insert in middle saved_data = self[hslice.starts[0] + n_recs:] if hslice.counts[0] > 0: hslice.select() lib.call(const.PUT_, const.zVAR_HYPERDATA_, data.ctypes.data_as(ctypes.c_void_p)) if hslice.counts[0] < n_recs: first_rec = hslice.starts[0] + hslice.counts[0] last_rec = hslice.dimsizes[0] - 1 lib.call(const.DELETE_, const.zVAR_RECORDS_, ctypes.c_long(first_rec), ctypes.c_long(last_rec)) elif hslice.counts[0] > n_recs and \ hslice.starts[0] + n_recs < hslice.dimsizes[0]: #Put saved data in after inserted data self[hslice.starts[0] + hslice.counts[0]:] = saved_data def extend(self, data): """ Append multiple values to the end of this variable This is an efficiency function which overrides the base implementation in MutableSequence. Parameters ---------- data : the data to append """ self[len(self):] = data def insert(self, index, data): """ Inserts a *single* record before an index Parameters ---------- index : int index before which to insert the new record data : the record to insert """ self[index:index] = [data] def _create(self, var_name, datatype, n_elements = 1, dims = (), recVary = const.VARY, dimVarys = None): """Creates a new zVariable @param var_name: name of this variable @type var_name: string @param datatype: CDF data type @type datatype: ctypes.c_long @param n_elements: number of elements (should be 1 except for CDF_CHAR variables). @type n_elements: long @param dims: size of each dimension for multi-dimensional variable, or empty for a zero-dimensional @type dims: sequence of long @param recVary: record variance for this variable (VARY/NOVARY) @type recVary: long @param dimVarys: array of VARY or NOVARY, variance for each dimension @type dimVarys: sequence of long @return: new variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.new}. """ dim_array = (ctypes.c_long * len(dims))(*dims) enc_name = var_name.encode('ascii') if dimVarys is None: dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY) else: dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys) varNum = ctypes.c_long(0) self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype, ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array, recVary, dim_vary_array, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) def _delete(self): """Removes this zVariable from the CDF @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ self._call(const.DELETE_, const.zVAR_) self.cdf_file.clear_from_cache(self._name) self._name = None def _get(self, var_name): """Gets an existing zVariable @param var_name: name of this variable @type var_name: string @return: variable with this name @rtype: :py:class:`pycdf.Var` @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. @note: Not intended to be used directly; use L{CDF.__getitem__}. """ if isinstance(var_name, str_classes): try: enc_name = var_name.encode('ascii').rstrip() except AttributeError: enc_name = var_name.rstrip() #already in ASCII #'touch' CDF to cause an error if the name isn't there; get number varNum = ctypes.c_long(0) self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum)) self._name = enc_name self.cdf_file.add_to_cache(enc_name, varNum.value) else: #Looking up by number name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1) self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name), const.GET_, const.zVAR_NAME_, name) self._name = name.value.rstrip() self.cdf_file.add_to_cache(self._name, var_name) def _num(self): """Returns the zVar number for this variable @return: number of this zVar @rtype: int """ return self.cdf_file.var_num(self._name) def __len__(self): """Get number of records for this variable in this file @return: Number of records @rtype: long @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ count = ctypes.c_long(0) self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count)) return (count.value + 1) def __repr__(self): """Returns representation of the variable Cannot return anything that can be eval'd to create a copy, so just wrap the informal representation in angle brackets. @return: info on this zVar @rtype: str """ return '<Var:\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the variable This is an 'informal' representation in that it cannot be evaluated directly to create a :py:class:`pycdf.Var`. @return: info on this zVar, CDFTYPE [dimensions] NRV (if not record-varying) @rtype: str """ if self.cdf_file._opened: cdftype = self.type() chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value) rv = self.rv() typestr = lib.cdftypenames[cdftype] + \ ('*' + str(self._nelems()) if cdftype in chartypes else '' ) if rv: sizestr = str([len(self)] + self._dim_sizes()) else: sizestr = str(self._dim_sizes()) return typestr + ' ' + sizestr + ('' if rv else ' NRV') else: if isinstance(self._name, str): return 'zVar "{0}" in closed CDF {1}'.format( self._name, self.cdf_file.pathname) else: return 'zVar "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self.cdf_file.pathname.decode('ascii')) def _n_dims(self): """Get number of dimensions for this variable @return: the number of dimensions @rtype: long """ n_dims = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims)) return n_dims.value def _dim_sizes(self): """Get the dimension sizes for this variable @return: sequence of sizes @rtype: sequence of long @note: This will always be in Python order (i.e. row major, last index iterates most quickly), *regardless* of the majority of the CDF. """ sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0) self._call(const.GET_, const.zVAR_DIMSIZES_, sizes) sizes = sizes[0:self._n_dims()] return sizes def rv(self, new_rv=None): """ Gets or sets whether this variable has record variance If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Other Parameters ================ new_rv : boolean True to change to record variance, False to change to NRV, unspecified to simply check variance. Returns ======= out : Boolean True if record varying, False if NRV """ if new_rv != None: self._call(const.PUT_, const.zVAR_RECVARY_, const.VARY if new_rv else const.NOVARY) vary = ctypes.c_long(0) self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary)) return vary.value != const.NOVARY.value def dv(self, new_dv=None): """ Gets or sets dimension variance of each dimension of variable. If the variance is unknown, True is assumed (this replicates the apparent behavior of the CDF library on variable creation). Parameters ========== new_dv : list of boolean Each element True to change that dimension to dimension variance, False to change to not dimension variance. (Unspecified to simply check variance.) Returns ======= out : list of boolean True if that dimension has variance, else false. """ ndims = self._n_dims() if new_dv != None: if len(new_dv) != ndims: raise ValueError('Must specify variance for ' + str(ndims) + 'dimensions.') varies = (ctypes.c_long * ndims)( *[const.VARY if dv else const.NOVARY for dv in new_dv]) self._call(const.PUT_, const.zVAR_DIMVARYS_, varies) if ndims == 0: return [] varies = (ctypes.c_long * const.CDF_MAX_DIMS)() self._call(const.GET_, const.zVAR_DIMVARYS_, varies) return [dv != const.NOVARY.value for dv in varies[0:ndims]] def _call(self, *args, **kwargs): """Select this CDF and variable and call the CDF internal interface Adds call to select this CDF to L{args} and passes all parameters directly through to the CDFlib routine of the CDF library's C internal interface. Checks the return value with L{Library.check_status}. @param args: Passed directly to the CDF library interface. Useful constants are defined in the :py:mod:`pycdf.const` module of this package. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self.cdf_file._call( const.SELECT_, const.zVAR_, ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs) def _np_type(self): """Returns the numpy type of this variable This is the numpy type that will come directly out of the CDF; see :meth:`dtype` for the representation post-conversion. Raises ====== CDFError : for library-reported error or failure to find numpy type Returns ======= out : dtype numpy dtype that will hold value from this variable """ cdftype = self.type() if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value: return numpy.dtype('S' + str(self._nelems())) try: return lib.numpytypedict[cdftype] except KeyError: raise CDFError(const.BAD_DATA_TYPE) def type(self, new_type=None): """ Returns or sets the CDF type of this variable Parameters ========== new_type : ctypes.c_long the new type from :mod:`~pycdf.const` Returns ======= out : int CDF type """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) n_elements = ctypes.c_long(self._nelems()) self._call(const.PUT_, const.zVAR_DATASPEC_, new_type, n_elements) self._type = None if self._type is None: cdftype = ctypes.c_long(0) self._call(const.GET_, const.zVAR_DATATYPE_, ctypes.byref(cdftype)) self._type = cdftype.value return self._type def _nelems(self): """Number of elements for each value in this variable This is the length of strings for CHAR and UCHAR, should be 1 otherwise. @return: length of strings @rtype: int """ nelems = ctypes.c_long(0) self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems)) return nelems.value def name(self): """ Returns the name of this variable Returns ======= out : str variable's name """ if isinstance(self._name, str): return self._name elif isinstance(self._name, bytes): return self._name.decode() def compress(self, comptype=None, param=None): """ Set or check the compression of this variable Compression may not be changeable on variables with data already written; even deleting the data may not permit the change. See section 2.6 of the CDF user's guide for more information on compression. Other Parameters ================ comptype : ctypes.c_long type of compression to change to, see CDF C reference manual section 4.10. Constants for this parameter are in :mod:`~pycdf.const`. If not specified, will not change compression. param : ctypes.c_long Compression parameter, see CDF CRM 4.10 and :mod:`~pycdf.const`. If not specified, will choose reasonable default (5 for gzip; other types have only one possible parameter.) Returns ======= out : tuple the (comptype, param) currently in effect """ return _compress(self, comptype, param) def copy(self): """ Copies all data and attributes from this variable Returns ======= out : :class:`VarCopy` list of all data in record order """ return VarCopy(self) def rename(self, new_name): """ Renames this variable Parameters ========== new_name : str the new name for this variable """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_VAR_NAME_LEN256: raise CDFError(const.BAD_VAR_NAME) self._call(const.PUT_, const.zVAR_NAME_, enc_name) self.cdf_file.add_to_cache( enc_name, self.cdf_file.var_num(self._name)) #Still in cache del self.cdf_file._var_nums[self._name] self._name = enc_name @property def shape(self): """ Provides the numpy array-like shape of this variable. Returns a tuple; first element is number of records (RV variable only) And the rest provide the dimensionality of the variable. .. note:: Assigning to this attribute will not change the shape. """ if self.rv(): return tuple([len(self)] + self._dim_sizes()) else: return tuple(self._dim_sizes()) @property def dtype(self): """ Provide the numpy dtype equivalent to the CDF type of this variable. Data from this variable will be returned in numpy arrays of this type. See Also -------- type """ cdftype = self.type() if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str is not bytes and not self._raw: return numpy.dtype('U' + str(self._nelems())) if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value) and not self._raw: return numpy.dtype('O') return self._np_type() def _get_attrs(self): """Get attribute list Provide access to the zVar's attribute list without holding a strong reference, as the attribute list has a (strong) back-reference to its parent. Either deref a weak reference (to try and keep the object the same), or make a new AttrList instance and assign it to the weak reference for next time. """ al = self._attrlistref() if al is None: al = zAttrList(self) self._attrlistref = weakref.ref(al) return al def _set_attrs(self, value): """Assign to the attribute list Clears all elements of the attribute list and copies from value """ self.attrs.clone(value) attrs = property( _get_attrs, _set_attrs, None, """zAttributes for this zVariable in a dict-like format. See :class:`zAttrList` for details. """) class _Hyperslice(object): """Represents a CDF 'slice' used for the hyper CDF functions For internal module use only. @ivar dims: number of dimensions to this slice, usually number of dimensions to the variable plus one for the record, which represents the 0th (least rapidly varying) dimension. @type dims: int @ivar dimsizes: size of each dimension (0th is number of records) @type dimsizes: list of int @ivar starts: index of the start value for each dimension ('dimension indices' in CDF speak) @type starts: list of int @ivar counts: number of values to get from each dimension. Final result will be the product of everything in counts. ('dimension counts' in CDF speak) @type counts: numpy.array @ivar intervals: interval between successive indices to use for each dimension. ('dimension invervals' in CDF speak) @type intervals: list of int @ivar degen: is this dimension degenerate, i.e. should be removed in the returned dataset. A 3D array with one dimension degenerate will be returned as a 2D array (i.e. list-of-lists.) @type degen: numpy.array @ivar rev: should this dimension be returned in reverse order? @type rev: numpy.array @ivar column: is this slice in column-major mode (if false, row-major) @type column: boolean @ivar zvar: what CDF variable this object slices on @type zvar: :py:class:`pycdf.Var` @ivar expanded_key: fully-expanded version of the key passed to the constructor (all dimensions filled in) @type expanded_key: tuple @note: All dimension-related variables are stored row-major (Python order) """ def __init__(self, zvar, key): """Create a Hyperslice @param zvar: zVariable that this slices @type zvar: :py:class:`pycdf.Var` @param key: Python multi-dimensional slice as passed to __getitem__ @type key: tuple of slice and/or int @raise IndexError: if slice is out of range, mismatches dimensions, or otherwise unparsable. @raise ValueError: if slice has invalid values """ self.zvar = zvar self.rv = self.zvar.rv() #dim of records, + 1 record dim (NRV always is record 0) self.dims = zvar._n_dims() + 1 self.dimsizes = [len(zvar)] + \ zvar._dim_sizes() self.starts = [0] * self.dims self.counts = numpy.empty((self.dims,), dtype=numpy.int32) self.counts.fill(1) self.intervals = [1] * self.dims self.degen = numpy.zeros(self.dims, dtype=numpy.bool) self.rev = numpy.zeros(self.dims, dtype=numpy.bool) #key is: #1. a single value (integer or slice object) if called 1D #2. a tuple (of integers and/or slice objects) if called nD #3. Each item is either a single value (degenerate dim) # or a slice object. if not hasattr(key, '__len__'): #Not a container object, pack in tuple key = (key, ) if not self.rv: key = (0, ) + key #NRV, so always get 0th record (degenerate) key = self.expand_ellipsis(key, self.dims) if self.rv: #special-cases for RV variables if len(key) == 1: #get all data for this record(s) key = self.expand_ellipsis(key + (Ellipsis, ), self.dims) elif len(key) == self.dims - 1: #get same slice from each record key = (slice(None, None, None), ) + key if len(key) == self.dims: self.expanded_key = key for i in range(self.dims): idx = key[i] if hasattr(idx, 'start'): #slice (self.starts[i], self.counts[i], self.intervals[i], self.rev[i]) = \ self.convert_range(idx.start, idx.stop, idx.step, self.dimsizes[i]) else: #Single degenerate value if idx < 0: idx += self.dimsizes[i] if idx != 0 and (idx >= self.dimsizes[i] or idx < 0): raise IndexError('list index out of range') self.starts[i] = idx self.degen[i] = True else: raise IndexError('Slice does not match dimensions for zVar ' + str(zvar._name)) self.column = zvar.cdf_file.col_major() def expected_dims(self, data=None): """Calculate size of non-degenerate dimensions Figures out size, in each dimension, of expected input data @return: size of each dimension for this slice, excluding degenerate @rtype: list of int """ return [self.counts[i] for i in range(self.dims) if not self.degen[i]] def expand(self, data): """Expands the record dimension of this slice to hold a set of data If the length of data (outermost dimension) is larger than the record count (counts[0]) for this slice, expand the slice to hold all the data. This requires that the record dimension of the slice not be degenerate, and also that it not have been completely specified when the hyperslice was created (i.e. record dimension either ellipsis or no specified stop.) Does *not* expand any other dimension, since that's Very Hard in CDF. @param data: the data which are intended to be stored in this slice @type data: list """ rec_slice = self.expanded_key[0] if not self.rv or isinstance(data, str_classes) or self.degen[0] or \ not hasattr(rec_slice, 'stop'): return if len(data) < self.counts[0]: #Truncate to fit data if rec_slice.stop is None and rec_slice.step in (None, 1): self.counts[0] = len(data) elif len(data) > self.counts[0]: #Expand to fit data if rec_slice.step in (None, 1): self.counts[0] = len(data) def create_array(self): """Creates a numpy array to hold the data from this slice Returns ======= out : numpy.array array sized, typed, and dimensioned to hold data from this slice """ counts = self.counts degen = self.degen if self.column: counts = self.reorder(counts) degen = self.reorder(degen) #TODO: Forcing C order for now, revert to using self.column later array = numpy.empty( [counts[i] for i in range(len(counts)) if not degen[i]], self.zvar._np_type(), order='C') return numpy.require(array, requirements=('C', 'A', 'W')) def convert_input_array(self, buffer): """Converts a buffer of raw data from this slice EPOCH(16) variables always need to be converted. CHAR need converted to Unicode if py3k Parameters ========== buffer : numpy.array data as read from the CDF file Returns ======= out : numpy.array converted data """ result = self._flip_array(buffer) #Convert to derived types cdftype = self.zvar.type() if not self.zvar._raw: if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \ str != bytes: dt = numpy.dtype('U{0}'.format(result.dtype.itemsize)) result = numpy.require(numpy.char.array(result).decode(), dtype=dt) elif cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(result) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(result) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(result) return result def convert_output_array(self, buffer): """Convert a buffer of data that will go into this slice Parameters ========== buffer : numpy.array data to go into the CDF file Returns ======= out : numpy.array input with majority flipped and dimensions reversed to be suitable to pass directly to CDF library. """ buffer = self._flip_array(buffer) return numpy.require(buffer, requirements=('C', 'A', 'W')) def _flip_array(self, data): """ Operations for majority, etc. common between convert_input and _output """ cdftype = self.zvar.type() #Flip majority if any non-degenerate dimensions exist if self.column and not min(self.degen): #Record-number dim degen, swap whole thing if self.degen[0]: if cdftype == const.CDF_EPOCH16.value: #Maintain last dimension data = data.transpose( list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose() #Record-number dimension is not degenerate, so keep it first else: if cdftype == const.CDF_EPOCH16.value: data = data.transpose( [0] + list(range(len(data.shape) - 2, 0, -1)) + [len(data.shape) - 1] ) else: data = data.transpose( [0] + list(range(len(data.shape) - 1, 0, -1))) #Reverse non-degenerate dimensions in rev #Remember that the degenerate indices are already gone! if self.rev.any(): sliced = [(slice(None, None, -1) if self.rev[i] else slice(None)) for i in range(self.dims) if not self.degen[i]] if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim sliced.extend(slice(None)) data = operator.getitem(data, tuple(sliced)) return data def select(self): """Selects this hyperslice in the CDF Calls the CDF library to select the CDF, variable, records, and array elements corresponding to this slice. """ args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]), const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]), const.SELECT_, const.zVAR_RECINTERVAL_, ctypes.c_long(self.intervals[0])) if self.dims > 1: dims = self.dims - 1 args += (const.SELECT_, const.zVAR_DIMINDICES_, (ctypes.c_long * dims)(*self.starts[1:]), const.SELECT_, const.zVAR_DIMCOUNTS_, (ctypes.c_long * dims)(*self.counts[1:]), const.SELECT_, const.zVAR_DIMINTERVALS_, (ctypes.c_long * dims)(*self.intervals[1:])) self.zvar._call(*args) @staticmethod def expand_ellipsis(slices, n_dims): """Expands any ellipses into correct number of full-size slices @param slices: tuple of slices, integers, or ellipse objects @type slices: tuple @param n_dims: number of dimensions this slice is over @type n_dims: int @return: L{slices} with ellipses replaced by appropriate number of full-dimension slices @rtype: tuple @raise IndexError: if ellipses specified when already have enough dimensions """ if slices is Ellipsis: return tuple([slice(None, None, None) for i in range(n_dims)]) #Elements might be numpy arrays, so can't use in/index idx = [i for i, v in enumerate(slices) if v is Ellipsis] if not idx: #no ellipsis return slices if len(idx) > 1: #multiples! raise IndexError('Ellipses can only be used once per slice.') idx = idx[0] #how many dims to expand ellipsis to #remember the ellipsis is in len(slices) and must be replaced! extra = n_dims - len(slices) + 1 if extra < 0: raise IndexError('too many indices') result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:] return result @staticmethod def check_well_formed(data): """Checks if input data is well-formed, regular array""" d = numpy.asanyarray(data) if d.dtype == numpy.object: #this is probably going to be bad try: len(d.flat[0]) except TypeError: #at least it's not a list pass else: raise ValueError( 'Data must be well-formed, regular array of number, ' 'string, or datetime') @staticmethod def dimensions(data): """Finds the dimensions of a nested list-of-lists @param data: data of which dimensions are desired @type data: list (of lists) @return: dimensions of L{data}, in order outside-in @rtype: list of int @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) _Hyperslice.check_well_formed(d) return d.shape @staticmethod def types(data, backward=False): """Find dimensions and valid types of a nested list-of-lists Any given data may be representable by a range of CDF types; infer the CDF types which can represent this data. This breaks down to: 1. Proper kind (numerical, string, time) 2. Proper range (stores highest and lowest number) 3. Sufficient resolution (EPOCH16 required if datetime has microseconds or below.) If more than one value satisfies the requirements, types are returned in preferred order: 1. Type that matches precision of data first, then 2. integer type before float type, then 3. Smallest type first, then 4. signed type first, then 5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1) So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies below the millisecond level (rule 1), but otherwise EPOCH is preferred (rule 2). For floats, four-byte is preferred unless eight-byte is required: 1. absolute values between 0 and 3e-39 2. absolute values greater than 1.7e38 This will switch to an eight-byte double in some cases where four bytes would be sufficient for IEEE 754 encoding, but where DEC formats would require eight. @param data: data for which dimensions and CDF types are desired @type data: list (of lists) @param backward: limit to pre-CDF3 types @type backward: bool @return: dimensions of L{data}, in order outside-in; CDF types which can represent this data; number of elements required (i.e. length of longest string) @rtype: 3-tuple of lists ([int], [ctypes.c_long], [int]) @raise ValueError: if L{data} has irregular dimensions """ d = numpy.asanyarray(data) dims = d.shape elements = 1 types = [] _Hyperslice.check_well_formed(d) if d.dtype.kind in ('S', 'U'): #it's a string types = [const.CDF_CHAR, const.CDF_UCHAR] elements = d.dtype.itemsize if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per elements //= 4 elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'): if max((dt.microsecond % 1000 for dt in d.flat)) > 0: types = [const.CDF_EPOCH16, const.CDF_EPOCH, const.CDF_TIME_TT2000] else: types = [const.CDF_EPOCH, const.CDF_EPOCH16, const.CDF_TIME_TT2000] if backward: del types[types.index(const.CDF_EPOCH16)] del types[-1] elif not lib.supports_int8: del types[-1] elif d is data or isinstance(data, numpy.generic): #numpy array came in, use its type (or byte-swapped) types = [k for k in lib.numpytypedict if (lib.numpytypedict[k] == d.dtype or lib.numpytypedict[k] == d.dtype.newbyteorder()) and not k in lib.timetypes] if (not lib.supports_int8 or backward) \ and const.CDF_INT8.value in types: del types[types.index(const.CDF_INT8.value)] #Maintain priority to match the ordered lists below: #float/double (44, 45) before real (21/22), and #byte (41) before int (1) before char (51). So hack. #Consider making typedict an ordered dict once 2.6 is dead. types.sort(key=lambda x: x % 50, reverse=True) if not types: #not a numpy array, or can't parse its type if d.dtype.kind == 'O': #Object. Try to make it numeric #Can't do safe casting from Object, so try and compare #Basically try most restrictive to least restrictive trytypes = (numpy.uint64, numpy.int64, numpy.float64) for t in trytypes: try: newd = d.astype(dtype=t) except: #Failure to cast, try next type continue if (newd == d).all(): #Values preserved, use this type d = newd #Continue with normal guessing, as if a list break else: #fell through without a match raise ValueError( 'Cannot convert generic objects to CDF type.') if d.dtype.kind in ('i', 'u'): #integer minval = numpy.min(d) maxval = numpy.max(d) if minval < 0: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_INT2, const.CDF_INT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] else: types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1, const.CDF_INT2, const.CDF_UINT2, const.CDF_INT4, const.CDF_UINT4, const.CDF_INT8, const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] cutoffs = [2 ** 7, 2 ** 7, 2 ** 8, 2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63, 1.7e38, 1.7e38, 8e307, 8e307] types = [t for (t, c) in zip(types, cutoffs) if c > maxval and (minval >= 0 or minval >= -c)] if (not lib.supports_int8 or backward) \ and const.CDF_INT8 in types: del types[types.index(const.CDF_INT8)] else: #float if dims is (): if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] else: absolutes = numpy.abs(d[d != 0]) if len(absolutes) > 0 and \ (numpy.max(absolutes) > 1.7e38 or numpy.min(absolutes) < 3e-39): types = [const.CDF_DOUBLE, const.CDF_REAL8] else: types = [const.CDF_FLOAT, const.CDF_REAL4, const.CDF_DOUBLE, const.CDF_REAL8] types = [t.value if hasattr(t, 'value') else t for t in types] return (dims, types, elements) @staticmethod def reorder(seq): """Reorders seq to switch array majority Used to take an array of subscripts between row and column majority. First element is not touched, being the record number. @param seq: a sequence of *subscripts* @type seq: sequence of integers @return: seq with all but element 0 reversed in order @rtype: sequence of integers """ return numpy.concatenate((seq[0:1], numpy.flipud(seq)[:-1])) @staticmethod def convert_range(start, stop, step, size): """Converts a start/stop/step range to start/count/interval (i.e. changes from Python-style slice to CDF-style) @param start: index to start a slice at, may be none or negative @type start: int @param stop: index at end of slice (one-past, standard Python), may be none or negative @type stop: int @param step: interval for stepping through stlice @type step: int @param size: size of list to slice @type size: int @return: (start, count, interval, rev) where: 1. start is the start index, normalized to be within the size of the list and negatives handled 2. count is the number of records in the slice, guaranteed to stop before the end 3. interval is the skip between records 4. rev indicates whether the sequence should be reversed @rtype: (int, int, int, boolean) """ (start, stop, step) = slice(start, stop, step).indices(size) if step < 0: step *= -1 count = int((start - stop + step - 1) / step) start = start - (count - 1) * step rev = True else: count = int((stop - start + step - 1) / step) rev = False if count < 0: count = 0 start = 0 return (start, count, step, rev) class Attr(MutableSequence): """An attribute, g or z, for a CDF .. warning:: This class should not be used directly, but only in its subclasses, :class:`gAttr` and :class:`zAttr`. The methods listed here are safe to use in the subclasses. Represents a CDF attribute, providing access to the Entries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. An introduction to CDF attributes can be found in section 2.4 of the CDF user's guide. Each element of the list is a single Entry of the appropriate type. The index to the elements is the Entry number. Multi-dimensional slicing is *not* supported; an Entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry .. autosummary:: ~Attr.append ~Attr.has_entry ~Attr.insert ~Attr.max_idx ~Attr.new ~Attr.number ~Attr.rename ~Attr.type .. automethod:: append .. automethod:: has_entry .. automethod:: insert .. automethod:: max_idx .. automethod:: new .. automethod:: number .. automethod:: rename .. automethod:: type """ def __init__(self, cdf_file, attr_name, create=False): """Initialize this attribute @param cdf_file: CDF file containing this attribute @type cdf_file: :py:class:`pycdf.CDF` @param attr_name: Name of this attribute @type attr_name: str @param create: True to create attribute, False to look up existing. @type create: bool """ self._cdf_file = cdf_file self._raw = False if isinstance(attr_name, str_classes): try: self._name = attr_name.encode('ascii') except AttributeError: self._name = attr_name attrno = ctypes.c_long() if create: self._cdf_file._call(const.CREATE_, const.ATTR_, self._name, self.SCOPE, ctypes.byref(attrno)) self._cdf_file.add_attr_to_cache( self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE) else: #Ensure exists, and populate cache. See scope note below attrno, scope = self._cdf_file.attr_num(self._name) else: name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1) scope = ctypes.c_long(0) self._cdf_file._call(const.SELECT_, const.ATTR_, ctypes.c_long(attr_name)) #Because it's possible to create a gAttr Python objecting #referencing an Attribute with variable scope, and vice-versa, #do NOT assume the scope matches #(Higher level code checks for that being a bad thing.) self._cdf_file._call( const.GET_, const.ATTR_NAME_, name, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope)) self._name = name.value.rstrip() if scope.value == const.GLOBAL_SCOPE.value: scope = True elif scope.value == const.VARIABLE_SCOPE.value: scope = False else: raise CDFError(const.BAD_SCOPE) self._cdf_file.add_attr_to_cache(self._name, attr_name, scope) def __getitem__(self, key): """Return a slice of Entries. Because Attributes may be sparse, a multi-element slice will return None for those elements which do not have associated Entries. @param key: index or range of Entry number to return @type key: slice or int @return: a list of entries, appropriate type. @raise IndexError: if L{key} is an int and that Entry number does not exist. """ if key is Ellipsis: key = slice(None, None, None) if hasattr(key, 'indices'): idx = range(*key.indices(self.max_idx() + 1)) return [self._get_entry(i) if self.has_entry(i) else None for i in idx] else: if self.has_entry(key): return self._get_entry(key) else: raise IndexError('list index ' + str(key) + ' out of range.') def _check_other_entries(self, types): """Try to get the type of this entry from others in the Attribute For zAttrs, checks if all other Entries are the same type, and at least one doesn't match its zVar, i.e. Entry type dominates (otherwise assumption is the Var type dominates). For gAttrs, checks all other Entries, and gives priority to the one that's earliest in the possible type list and exists in other Entries. This is only one component of Entry type guessing! :param list types: CDF types that are candidates (match the data) :return: The type discerned from other Entries, or None """ if self.ENTRY_ == const.zENTRY_: #If everything else is the same entry type, #and one is not the same as its var, probably #all entries should be of that type cand_et = None #The Entry type that might work one_var_diff = False #One Var has a type different from Entry for num in range(self.max_idx() + 1): if not self.has_entry(num): continue vartype = self._cdf_file[num].type() entrytype = self.type(num) if vartype != entrytype: one_var_diff = True if cand_et is None: if not entrytype in types: return None #One var has Entry with "impossible" type cand_et = entrytype elif cand_et != entrytype: return None #Two vars have Entries with different types if one_var_diff and cand_et is not None: return cand_et else: # Of those types which exist in other entries, # find the one which is earliest # in types, i.e. the preferred type entrytypes = [self.type(num) for num in range(self.max_idx() + 1) if self.has_entry(num)] entrytypes = [et for et in entrytypes if et in types] if entrytypes: return types[ min([types.index(et) for et in entrytypes])] return None def __setitem__(self, key, data): """Set a slice of Entries. @param key: index or range of Entry numbers to set @type key: slice or int @param data: the data to set these entries to. Normally each entry should be a sequence; if a scalar is provided, it is treated as a single-element list. @type data: scalar or list @raise ValueError: if size of {data} does not match size of L{key} @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): #Single value, promote everything a dimension idx = (key, key + 1, 1) data = [data] else: idx = key.indices(self.max_idx() + 1) if key.step is None or key.step > 0: #Iterating forward, extend slice to match data if len(data) > len(range(*idx)): idx = (idx[0], idx[0] + idx[2] * len(data), idx[2]) #get, and check, types and sizes for all data #checks first so don't have error after changing half the Entries data_idx = -1 typelist = [] for i in range(*idx): data_idx += 1 if data_idx >= len(data): continue datum = data[data_idx] if datum is None: typelist[i] = (None, None, None) continue (dims, types, elements) = _Hyperslice.types( datum, backward=self._cdf_file.backward) if len(types) <= 0: raise ValueError('Cannot find a matching CDF type.') if len(dims) > 1: raise ValueError('Entries must be scalar or 1D.') elif len(dims) == 1 and isinstance(datum[0], str_classes): raise ValueError('Entry strings must be scalar.') entry_type = None if self.has_entry(i): #If the entry already exists, match its type entry_type = self.type(i) if not entry_type in types: entry_type = None if entry_type is None: #Check other entries for this attribute entry_type = self._check_other_entries(types) if entry_type is None and self.ENTRY_ == const.zENTRY_: #Fall back to zVar type vartype = self._cdf_file[i].type() if vartype in types: entry_type = vartype else: entry_type = types[0] elif entry_type is None: entry_type = types[0] if not entry_type in lib.numpytypedict: raise ValueError('Cannot find a matching numpy type.') typelist.append((dims, entry_type, elements)) data_idx = -1 for i in range(*idx): data_idx += 1 if data_idx >= len(data) or data[data_idx] is None: if self.has_entry(i): del self[i] continue datum = data[data_idx] (dims, entry_type, elements) = typelist[data_idx] self._write_entry(i, datum, entry_type, dims, elements) def __delitem__(self, key): """Delete a slice of Entries. @param key: index or range of Entry numbers to delete @type key: slice or int @note: Attributes do not 'grow' or 'shrink' as entries are added or removed. Indexes of entries never change and there is no way to 'insert'. """ if key is Ellipsis: key = slice(None, None, None) if not hasattr(key, 'indices'): idx = (key, key + 1, 1) else: idx = key.indices(self.max_idx() + 1) for i in range(*idx): self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i), const.DELETE_, self.ENTRY_) def __iter__(self, current=0): """Iterates over all entries in this Attribute Returns data from one entry at a time until reaches the end. @note: Returned in entry-number order. """ while current <= self.max_idx(): if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current += 1 def __reversed__(self, current=None): """Iterates over all entries in this Attribute Returns data from one entry at a time, starting at end and going to beginning. @note: Returned in entry-number order. """ if current is None: current = self.max_idx() while current >= 0: if self.has_entry(current): value = yield(self._get_entry(current)) if value != None: current = value current -= 1 def __len__(self): """Number of Entries for this Attr. NOT same as max Entry number. @return: Number of Entries @rtype: int """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count)) return count.value def __repr__(self): """Returns representation of an attribute Cannot return anything that can be eval'd to create a copy of the attribtute, so just wrap the informal representation in angle brackets. @return: all the data in this attribute @rtype: str """ return '<\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute This is an 'informal' representation in that it cannot be evaluated directly to create an L{Attr}. @return: all the data in this attribute @rtype: str """ if self._cdf_file._opened: return '\n'.join([str(item) for item in self]) else: if isinstance(self._name, str): return 'Attribute "{0}" in closed CDF {1}'.format( self._name, self._cdf_file.pathname) else: return 'Attribute "{0}" in closed CDF {1}'.format( self._name.decode('ascii'), self._cdf_file.pathname.decode('ascii')) def insert(self, index, data): """Insert an entry at a particular number Inserts entry at particular number while moving all subsequent entries to one entry number later. Does not close gaps. Parameters ========== index : int index where to put the new entry data : data for the new entry """ max_entry = self.max_idx() if index > max_entry: #Easy case self[index] = data return for i in range(max_entry, index - 1, -1): if self.has_entry(i+1): self.__delitem__(i+1) if self.has_entry(i): self.new(self.__getitem__(i), type=self.type(i), number=i+1) self[index] = data def append(self, data): """Add an entry to end of attribute Puts entry after last defined entry (does not fill gaps) Parameters ========== data : data for the new entry """ self[self.max_idx() + 1] = data def _call(self, *args, **kwargs): """Select this CDF and Attr and call the CDF internal interface @param args: Passed directly to the CDF library interface. @type args: various, see :py:mod:`ctypes`. @return: CDF status from the library @rtype: ctypes.c_long @note: Terminal NULL_ is automatically added to L{args}. @raise CDFError: if CDF library reports an error @raise CDFWarning: if CDF library reports a warning and interpreter is set to error on warnings. """ return self._cdf_file._call( const.SELECT_, const.ATTR_, ctypes.c_long(self._cdf_file.attr_num(self._name)[0]), *args, **kwargs) def _entry_len(self, number): """Number of elements in an Entry @param number: number of Entry @type number: int @return: number of elements @rtype: int """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') count = ctypes.c_long(0) self._call( const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count)) return count.value def type(self, number, new_type=None): """Find or change the CDF type of a particular Entry number Parameters ========== number : int number of Entry to check or change Other Parameters ================ new_type type to change this Entry to, from :mod:`~pycdf.const`. Omit to only check type. Returns ======= out : int CDF variable type, see :mod:`~pycdf.const` Notes ===== If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ if new_type != None: if not hasattr(new_type, 'value'): new_type = ctypes.c_long(new_type) size = ctypes.c_long(self._entry_len(number)) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATASPEC_, new_type, size, ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') cdftype = ctypes.c_long(0) status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype), ignore=(const.NO_SUCH_ENTRY,)) if status == const.NO_SUCH_ENTRY: raise IndexError('list index ' + str(number) + ' out of range.') return cdftype.value def has_entry(self, number): """Check if this attribute has a particular Entry number Parameters ========== number : int number of Entry to check or change Returns ======= out : bool True if ``number`` is a valid entry number; False if not """ status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_, ctypes.c_long(number), ignore=(const.NO_SUCH_ENTRY, )) return not status == const.NO_SUCH_ENTRY def max_idx(self): """Maximum index of Entries for this Attr Returns ======= out : int maximum Entry number """ count = ctypes.c_long(0) self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count)) return count.value def new(self, data, type=None, number=None): """Create a new Entry in this Attribute .. note:: If ``number`` is provided and an Entry with that number already exists, it will be overwritten. Parameters ========== data data to put in the Entry Other Parameters ================ type : int type of the new Entry, from :mod:`~pycdf.const` (otherwise guessed from ``data``) number : int Entry number to write, default is lowest available number. """ if number is None: number = 0 while self.has_entry(number): number += 1 (dims, types, elements) = _Hyperslice.types( data, backward=self._cdf_file.backward) if type is None: #Guess based on other entries type = self._check_other_entries(types) if type is None and self.ENTRY_ == const.zENTRY_: #Try to match variable type vartype = self._cdf_file[number].type() if vartype in types: type = vartype if type is None: type = types[0] elif hasattr(type, 'value'): type = type.value self._write_entry(number, data, type, dims, elements) def number(self): """Find the attribute number for this attribute Returns ======= out : int attribute number """ no = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.ATTR_NUMBER_, self._name, ctypes.byref(no)) return no.value def global_scope(self): """Determine scope of this attribute. Returns ======= out : bool True if global (i.e. gAttr), False if zAttr """ return self._cdf_file.attr_num(self._name)[1] def rename(self, new_name): """Rename this attribute Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== new_name : str the new name of the attribute """ try: enc_name = new_name.encode('ascii') except AttributeError: enc_name = new_name if len(enc_name) > const.CDF_ATTR_NAME_LEN256: raise CDFError(const.BAD_ATTR_NAME) self._call(const.PUT_, const.ATTR_NAME_, enc_name) self._cdf_file.add_attr_to_cache( enc_name, *self._cdf_file.attr_num(self._name)) #still in cache del self._cdf_file._attr_info[self._name] self._name = enc_name def _get_entry(self, number): """Read an Entry associated with this L{Attr} @param number: number of Entry to return @type number: int @return: data from entry numbered L{number} @rtype: list or str """ if not self.has_entry(number): raise IndexError('list index ' + str(number) + ' out of range.') #Make a big enough buffer length = self._entry_len(number) cdftype = self.type(number) if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): buff = numpy.empty((), 'S{0}'.format(length), order='C') else: if not cdftype in lib.numpytypedict: raise CDFError(const.BAD_DATA_TYPE) buff = numpy.empty((length,), lib.numpytypedict[cdftype], order='C') buff = numpy.require(buff, requirements=('C', 'A', 'W')) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.GET_, self.ENTRY_DATA_, buff.ctypes.data_as(ctypes.c_void_p)) #decode if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value): if str == bytes or self._raw: #Py2k, leave as bytes result = bytes(buff) else: #Py3k, make unicode result = str(numpy.char.array(buff).decode()) else: if not self._raw: if cdftype == const.CDF_EPOCH.value: result = lib.v_epoch_to_datetime(buff) elif cdftype == const.CDF_EPOCH16.value: result = lib.v_epoch16_to_datetime(buff) elif cdftype == const.CDF_TIME_TT2000.value: result = lib.v_tt2000_to_datetime(buff) else: result = buff else: result = buff if length == 1: result = result[0] return result def _write_entry(self, number, data, cdf_type, dims, elements): """Write an Entry to this Attr. @param number: number of Entry to write @type number: int @param data: data to write @param cdf_type: the CDF type to write, from :py:mod:`pycdf.const` @param dims: dimensions of L{data} @type dims: list @param elements: number of elements in L{data}, 1 unless it is a string @type elements: int """ if len(dims) == 0: n_write = 1 else: n_write = dims[0] if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value): data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.dtype('S' + str(elements))) n_write = elements elif cdf_type == const.CDF_EPOCH16.value: if not self._raw: try: data = lib.v_datetime_to_epoch16(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_EPOCH.value: if not self._raw: try: data = lib.v_datetime_to_epoch(data), except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.float64) elif cdf_type == const.CDF_TIME_TT2000.value: if not self._raw: try: data = lib.v_datetime_to_tt2000(data) except AttributeError: pass data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=numpy.int64) elif cdf_type in lib.numpytypedict: data = numpy.require(data, requirements=('C', 'A', 'W'), dtype=lib.numpytypedict[cdf_type]) else: raise CDFError(const.BAD_DATA_TYPE) self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number), const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type), ctypes.c_long(n_write), data.ctypes.data_as(ctypes.c_void_p)) def _delete(self): """Delete this Attribute Also deletes all Entries associated with it. """ self._call(const.DELETE_, const.ATTR_) self._cdf_file.clear_attr_from_cache(self._name) self._name = None class zAttr(Attr): """zAttribute for zVariables within a CDF. .. warning:: Because zAttributes are shared across all variables in a CDF, directly manipulating them may have unexpected consequences. It is safest to operate on zEntries via :class:`zAttrList`. .. note:: When accessing a zAttr, pyCDF exposes only the zEntry corresponding to the associated zVariable. See Also ======== :class:`Attr` """ ENTRY_ = const.zENTRY_ ENTRY_DATA_ = const.zENTRY_DATA_ SCOPE = const.VARIABLE_SCOPE ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_ ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_ def insert(self, index, data): """Insert entry at particular index number Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError def append(self, index, data): """Add entry to end of attribute list Since there can only be one zEntry per zAttr, this cannot be implemented. Raises ====== NotImplementedError : always """ raise NotImplementedError class gAttr(Attr): """Global Attribute for a CDF Represents a CDF attribute, providing access to the gEntries in a format that looks like a Python list. General list information is available in the python docs: `1 <http://docs.python.org/tutorial/introduction.html#lists>`_, `2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_, `3 <http://docs.python.org/library/stdtypes.html#typesseq>`_. Normally accessed by providing a key to a :class:`gAttrList`: >>> attribute = cdffile.attrs['attribute_name'] >>> first_gentry = attribute[0] Each element of the list is a single gEntry of the appropriate type. The index to the elements is the gEntry number. A gEntry may be either a single string or a 1D array of numerical type. Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR) with a single element are returned as scalars; multiple-element entries are returned as a list. No provision is made for accessing below the entry level; the whole list is returned at once (but Python's slicing syntax can be used to extract individual items from that list.) Multi-dimensional slicing is *not* supported; an entry with multiple elements will have all elements returned (and can thus be sliced itself). Example: >>> first_three = attribute[5, 0:3] #will fail >>> first_three = attribute[5][0:3] #first three elements of 5th Entry gEntries are *not* necessarily contiguous; a gAttribute may have an entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the *number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined gEntry number and :meth:`~Attr.has_entry` to determine if a particular gEntry number exists. Iterating over all entries is also supported:: >>> entrylist = [entry for entry in attribute] Deleting gEntries will leave a "hole": >>> attribute[0:3] = [1, 2, 3] >>> del attribute[1] >>> attribute.has_entry(1) False >>> attribute.has_entry(2) True >>> print attribute[0:3] [1, None, 3] Multi-element slices over nonexistent gEntries will return ``None`` where no entry exists. Single-element indices for nonexistent gEntries will raise ``IndexError``. Assigning ``None`` to a gEntry will delete it. When assigning to a gEntry, the type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing gEntry of the same number in this gAttribute #. other gEntries in this gAttribute #. data-matching constraints described in :meth:`CDF.new`. See Also ======== :class:`Attr` """ ENTRY_ = const.gENTRY_ ENTRY_DATA_ = const.gENTRY_DATA_ SCOPE = const.GLOBAL_SCOPE ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_ ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_ ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_ ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_ ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_ ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_ class AttrList(MutableMapping): """Object representing a list of attributes. .. warning:: This class should not be used directly, but only via its subclasses, :class:`gAttrList` and :class:`zAttrList`. Methods listed here are safe to use from the subclasses. .. autosummary:: ~AttrList.clone ~AttrList.copy ~AttrList.from_dict ~AttrList.new ~AttrList.rename .. automethod:: clone .. automethod:: copy .. automethod:: from_dict .. automethod:: new .. automethod:: rename """ def __init__(self, cdf_file, special_entry=None): """Initialize the attribute collection @param cdf_file: CDF these attributes are in @type cdf_file: :py:class:`pycdf.CDF` @param special_entry: callable which returns a "special" entry number, used to limit results for zAttrs to those which match the zVar (i.e. the var number) @type special_entry: callable """ self._cdf_file = cdf_file self.special_entry = special_entry def __getitem__(self, name): """Find an Attribute by name @param name: name of the Attribute to return @type name: str @return: attribute named L{name} @rtype: L{Attr} @raise KeyError: if there is no attribute named L{name} @raise CDFError: other errors in CDF library """ try: attrib = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attrib.global_scope() != self.global_scope: raise KeyError(name + ': no ' + self.attr_name + ' by that name.') return attrib def __setitem__(self, name, data): """Create an Attribute or change its entries @param name: name of Attribute to change @type name: str @param data: Entries to populate this Attribute with. Any existing Entries will be deleted! Another C{Attr} may be specified, in which case all its entries are copied. @type data: scalar, list, or L{Attr} """ if isinstance(data, AttrList): if name in self: del self[name] attr = self._get_or_create(name) for entryno in range(data.max_idx()): if data.has_entry(entryno): attr.new(data[entryno], data.type(entryno), entryno) else: attr = self._get_or_create(name) if isinstance(data, str_classes): data = [data] else: try: junk = len(data) except TypeError: data = [data] attr[:] = data del attr[len(data):] def __delitem__(self, name): """Delete an Attribute (and all its entries) @param name: name of Attribute to delete @type name: str """ try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status == const.NO_SUCH_ATTR: raise KeyError(name + ': ' + str(v)) else: raise if attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) attr._delete() def __iter__(self, current=0): """Iterates over all Attr in this CDF or variable Returns name of one L{Attr} at a time until reaches the end. @note: Returned in number order. """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) while current < count.value: candidate = self.AttrType(self._cdf_file, current) if candidate.global_scope() == self.global_scope: if self.special_entry is None or \ candidate.has_entry(self.special_entry()): if str == bytes: value = yield(candidate._name) else: value = yield(candidate._name.decode()) if value != None: current = self[value].number() current += 1 def __repr__(self): """Returns representation of attribute list Cannot return anything that can be eval'd to create a copy of the list, so just wrap the informal representation in angle brackets. @return: all the data in this list of attributes @rtype: str """ return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>' def __str__(self): """Returns a string representation of the attribute list This is an 'informal' representation in that it cannot be evaluated directly to create an L{AttrList}. @return: all the data in this list of attributes @rtype: str """ if self._cdf_file._opened: return '\n'.join([key + ': ' + ( ('\n' + ' ' * (len(key) + 2)).join( [str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']' for i in range(value.max_idx() + 1) if value.has_entry(i)]) if isinstance(value, Attr) else str(value) + ' [' + lib.cdftypenames[self.type(key)] + ']' ) for (key, value) in sorted(self.items())]) else: if isinstance(self._cdf_file.pathname, str): return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname) else: return 'Attribute list in closed CDF {0}'.format( self._cdf_file.pathname.decode('ascii')) def clone(self, master, name=None, new_name=None): """ Clones another attribute list, or one attribute from it, into this list. Parameters ========== master : AttrList the attribute list to copy from. This can be any dict-like object. Other Parameters ================ name : str (optional) name of attribute to clone (default: clone entire list) new_name : str (optional) name of the new attribute, default ``name`` """ if name is None: self._clone_list(master) else: self._clone_attr(master, name, new_name) def copy(self): """ Create a copy of this attribute list Returns ======= out : dict copy of the entries for all attributes in this list """ return dict((key, value[:] if isinstance(value, Attr) else value) for (key, value) in self.items()) def new(self, name, data=None, type=None): """ Create a new Attr in this AttrList Parameters ========== name : str name of the new Attribute Other Parameters ================ data data to put into the first entry in the new Attribute type CDF type of the first entry from :mod:`~pycdf.const`. Only used if data are specified. Raises ====== KeyError : if the name already exists in this list """ if name in self: raise KeyError(name + ' already exists.') #A zAttr without an Entry in this zVar will be a "get" not "create" attr = self._get_or_create(name) if data is not None: if self.special_entry is None: attr.new(data, type) else: attr.new(data, type, self.special_entry()) def rename(self, old_name, new_name): """ Rename an attribute in this list Renaming a zAttribute renames it for *all* zVariables in this CDF! Parameters ========== old_name : str the current name of the attribute new_name : str the new name of the attribute """ AttrList.__getitem__(self, old_name).rename(new_name) def from_dict(self, in_dict): """ Fill this list of attributes from a dictionary .. deprecated:: 0.1.5 Use :meth:`~pycdf.AttrList.clone` instead; it supports cloning from dictionaries. Parameters ========== in_dict : dict Attribute list is populated entirely from this dictionary; all existing attributes are deleted. """ warnings.warn("from_dict is deprecated and will be removed. Use clone.", DeprecationWarning) for k in in_dict: self[k] = in_dict[k] for k in list(self): if not k in in_dict: del self[k] def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{AttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name self[new_name] = master[name] def _clone_list(self, master): """Clones this attribute list from another @param master: the attribute list to copy from @type master: L{AttrList} """ for name in master: self._clone_attr(master, name) for name in list(self): #Can't iterate over a list we're changing if not name in master: del self[name] def _get_or_create(self, name): """Retrieve L{Attr} or create it if it doesn't exist @param name: name of the attribute to look up or create @type name: str @return: attribute with this name @rtype: L{Attr} """ attr = None try: attr = self.AttrType(self._cdf_file, name) except CDFError: (t, v, tb) = sys.exc_info() if v.status != const.NO_SUCH_ATTR: raise if attr is None: attr = self.AttrType(self._cdf_file, name, True) elif attr.global_scope() != self.global_scope: raise KeyError(name + ': not ' + self.attr_name) return attr class gAttrList(AttrList): """ Object representing *all* the gAttributes in a CDF. Normally accessed as an attribute of an open :class:`CDF`: >>> global_attribs = cdffile.attrs Appears as a dictionary: keys are attribute names; each value is an attribute represented by a :class:`gAttr` object. To access the global attribute TEXT: >>> text_attr = cdffile.attrs['TEXT'] See Also ======== :class:`AttrList` """ AttrType = gAttr attr_name = 'gAttribute' global_scope = True def __len__(self): """ Number of gAttributes in this CDF Returns ======= out : int number of gAttributes in the CDF """ count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_, ctypes.byref(count)) return count.value class zAttrList(AttrList): """Object representing *all* the zAttributes in a zVariable. Normally accessed as an attribute of a :class:`Var` in an open CDF: >>> epoch_attribs = cdffile['Epoch'].attrs Appears as a dictionary: keys are attribute names, values are the value of the zEntry associated with the appropriate zVariable. Each vAttribute in a CDF may only have a *single* entry associated with each variable. The entry may be a string, a single numerical value, or a series of numerical values. Entries with multiple values are returned as an entire list; direct access to the individual elements is not possible. Example: finding the first dependency of (ISTP-compliant) variable ``Flux``: >>> print cdffile['Flux'].attrs['DEPEND_0'] zAttributes are shared among zVariables, one zEntry allowed per zVariable. (pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will delete the underlying zAttribute. zEntries are created and destroyed by the usual dict methods on the zAttrlist: >>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry >>> del epoch_attribs['new_entry'] #delete the zEntry The type of the zEntry is guessed from data provided. The type is chosen to match the data; subject to that constraint, it will try to match (in order): #. existing zEntry corresponding to this zVar #. other zEntries in this zAttribute #. the type of this zVar #. data-matching constraints described in :py:meth:`CDF.new` See Also ======== :class:`AttrList` """ AttrType = zAttr attr_name = 'zAttribute' global_scope = False def __init__(self, zvar): """Initialize the attribute collection @param zvar: zVariable these attributes are in @param zvar: :py:class:`pycdf.Var` """ super(zAttrList, self).__init__(zvar.cdf_file, zvar._num) self._zvar = zvar def __getitem__(self, name): """Find an zEntry by name @param name: name of the zAttribute to return @type name: str @return: attribute named L{name} @rtype: L{zAttr} @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if attrib.has_entry(zvar_num): attrib._raw = self._zvar._raw return attrib[zvar_num] else: raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) def __delitem__(self, name): """Delete an zEntry by name @param name: name of the zEntry to delete @type name: str @raise KeyError: if there is no attribute named L{name} associated with this zVariable @raise CDFError: other errors in CDF library @note: If this is the only remaining entry, the Attribute will be deleted. """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(str(name) + ': no such attribute for variable ' + str(self._zvar._name)) del attrib[zvar_num] if len(attrib) == 0: attrib._delete() def __setitem__(self, name, data): """Sets a zEntry by name The type of the zEntry is guessed from L{data}. The type is chosen to match the data; subject to that constraint, it will try to match (in order): 1. existing zEntry corresponding to this zVar 2. other zEntries in this zAttribute 3. the type of this zVar 4. data-matching constraints described in L{_Hyperslice.types} @param name: name of zAttribute; zEntry for this zVariable will be set in zAttribute by this name @type name: str @raise CDFError: errors in CDF library @raise ValueError: if unable to find a valid CDF type matching L{data}, or if L{data} is the wrong dimensions. """ try: attr = super(zAttrList, self).__getitem__(name) except KeyError: attr = zAttr(self._cdf_file, name, True) attr._raw = self._zvar._raw attr[self._zvar._num()] = data def __len__(self): """Number of zAttributes in this variable @return: number of zAttributes in the CDF which have entries for this variable. @rtype: int """ length = 0 count = ctypes.c_long(0) self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_, ctypes.byref(count)) current = 0 while current < count.value: candidate = zAttr(self._cdf_file, current) if not candidate.global_scope(): if candidate.has_entry(self._zvar._num()): length += 1 current += 1 return length def type(self, name, new_type=None): """Find or change the CDF type of a zEntry in this zVar @param name: name of the zAttr to check or change @type name: str @param new_type: type to change it to, see :py:mod:`pycdf.const` @type new_type: ctypes.c_long @return: CDF variable type, see :py:mod:`pycdf.const` @rtype: int @note: If changing types, old and new must be equivalent, see CDF User's Guide section 2.5.5 pg. 57 """ attrib = super(zAttrList, self).__getitem__(name) zvar_num = self._zvar._num() if not attrib.has_entry(zvar_num): raise KeyError(name + ': no such attribute for variable ' + self._zvar.name()) return attrib.type(zvar_num, new_type) def _clone_attr(self, master, name, new_name=None): """Clones a single attribute from one in this list or another Copies data and types from the master attribute to the new one @param master: attribute list to copy attribute from @type master: L{zAttrList} @param name: name of attribute to copy @type name: str @param new_name: name of the new attribute, default L{name} @type new_name: str """ if new_name is None: new_name = name if new_name in self: del self[new_name] self.new(new_name, master[name], master.type(name) if hasattr(master, 'type') else None)
dot
This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") # MASKED: dot function (lines 871-925) def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out
871
925
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
t
Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov # MASKED: t function (lines 1041-1112) def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out
1,041
1,112
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
bmm
Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out # MASKED: bmm function (lines 1315-1373) def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out
1,315
1,373
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
histogram
Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out # MASKED: histogram function (lines 1376-1414) def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out
1,376
1,414
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
bincount
Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out # MASKED: bincount function (lines 1417-1467) def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out
1,417
1,467
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
eig
This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)])
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u # MASKED: eig function (lines 2039-2102) def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v
2,039
2,102
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
eigh
Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out # MASKED: eigh function (lines 2245-2311) def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector
2,245
2,311
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
eigvalsh
Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out # MASKED: eigvalsh function (lines 2766-2830) def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value
2,766
2,830
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
lstsq
Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # []
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value # MASKED: lstsq function (lines 2833-3005)
def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
2,833
3,005
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
frobenius_norm
The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ # MASKED: frobenius_norm function (lines 242-275) def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out
242
275
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
vector_norm
Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out # MASKED: vector_norm function (lines 277-318) def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out
277
318
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ..fluid.layer_helper import LayerHelper from ..framework import _varbase_creator, _dygraph_tracer from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype from ..static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ..fluid.layers import transpose, cast # noqa: F401 from ..fluid import layers import paddle from paddle.common_ops_import import core from paddle.common_ops_import import VarDesc from paddle import _C_ops __all__ = [] def matmul(x, y, transpose_x=False, transpose_y=False, name=None): """ Applies matrix multiplication to two tensors. `matmul` follows the complete broadcast rules, and its behavior is consistent with `np.matmul`. Currently, the input tensors' number of dimensions can be any, `matmul` can be used to achieve the `dot`, `matmul` and `batchmatmul`. The actual behavior depends on the shapes of :math:`x`, :math:`y` and the flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically: - If a transpose flag is specified, the last two dimensions of the tensor are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`. The multiplication behavior depends on the dimensions of `x` and `y`. Specifically: - If both tensors are 1-dimensional, the dot product result is obtained. - If both tensors are 2-dimensional, the matrix-matrix product is obtained. - If the `x` is 1-dimensional and the `y` is 2-dimensional, a `1` is prepended to its dimension in order to conduct the matrix multiply. After the matrix multiply, the prepended dimension is removed. - If the `x` is 2-dimensional and `y` is 1-dimensional, the matrix-vector product is obtained. - If both arguments are at least 1-dimensional and at least one argument is N-dimensional (where N > 2), then a batched matrix multiply is obtained. If the first argument is 1-dimensional, a 1 is prepended to its dimension in order to conduct the batched matrix multiply and removed after. If the second argument is 1-dimensional, a 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. The non-matrix (exclude the last two dimensions) dimensions are broadcasted according the broadcast rule. For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor, out will be a (j, k, n, p) tensor. Args: x (Tensor): The input tensor which is a Tensor. y (Tensor): The input tensor which is a Tensor. transpose_x (bool): Whether to transpose :math:`x` before multiplication. transpose_y (bool): Whether to transpose :math:`y` before multiplication. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # vector * vector x_data = np.random.random([10]).astype(np.float32) y_data = np.random.random([10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [1] # matrix * vector x_data = np.random.random([10, 5]).astype(np.float32) y_data = np.random.random([5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10] # batched matrix * broadcasted vector x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([2]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5] # batched matrix * batched matrix x_data = np.random.random([10, 5, 2]).astype(np.float32) y_data = np.random.random([10, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 5, 5] # batched matrix * broadcasted matrix x_data = np.random.random([10, 1, 5, 2]).astype(np.float32) y_data = np.random.random([1, 3, 2, 5]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.matmul(x, y) print(z.numpy().shape) # [10, 3, 5, 5] """ if in_dygraph_mode(): return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y) if _in_legacy_dygraph(): op_type = 'matmul_v2' op = getattr(_C_ops, op_type) return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y) attrs = { 'trans_x': transpose_x, 'trans_y': transpose_y, } def __check_input(x, y): var_names = {'x': x, 'y': y} for name, val in var_names.items(): check_variable_and_dtype( val, name, ['float16', 'float32', 'float64', 'complex64', 'complex128'], 'matmul') __check_input(x, y) helper = LayerHelper('matmul_v2', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def norm(x, p='fro', axis=None, keepdim=False, name=None): """ Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm for p > 0) of a given tensor. .. note:: This norm API is different from `numpy.linalg.norm`. This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm. But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor. For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM. Args: x (Tensor): The input tensor could be N-D tensor, and the input data type could be float32 or float64. p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`, `inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm. Default value is `fro`. axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int or list(int)/tuple(int) with only one element, the vector norm is computed over the axis. If `axis < 0`, the dimension to norm operation is rank(input) + axis. If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis. Defalut value is `None`. keepdim (bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have fewer dimension than the :attr:`input` unless :attr:`keepdim` is true, default value is False. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: results of norm operation on the specified axis of input tensor, it's data type is the same as input's Tensor. Examples: .. code-block:: python import paddle import numpy as np shape=[2, 3, 4] np_input = np.arange(24).astype('float32') - 12 np_input = np_input.reshape(shape) x = paddle.to_tensor(np_input) #[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]] # [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]] # compute frobenius norm along last two dimensions. out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1]) # out_fro.numpy() [17.435596 16.911535 16.7332 16.911535] # compute 2-order vector norm along last dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=-1) #out_pnorm.numpy(): [[21.118711 13.190906 5.477226] # [ 3.7416575 11.224972 19.131126]] # compute 2-order norm along [0,1] dimension. out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1]) #out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535] # compute inf-order norm out_pnorm = paddle.linalg.norm(x, p=np.inf) #out_pnorm.numpy() = [12.] out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0) #out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]] # compute -inf-order norm out_pnorm = paddle.linalg.norm(x, p=-np.inf) #out_pnorm.numpy(): [0.] out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0) #out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]] """ def frobenius_norm(input, dim=None, keepdim=False, name=None): """ The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. dim (list, optional): None for last two dimensions. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if dim is not None and not (isinstance(dim, list) and len(dim) == 2): raise ValueError( "The dim of frobenius norm op should be None or two elements list!" ) if paddle.in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, 'reduce_all', True) return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', False) attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False} if dim is None: attrs['reduce_all'] = True check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'frobenius_norm') helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='frobenius_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def vector_norm(input, porder=None, axis=None, keepdim=False, asvector=False, name=None): """ Calculate the p-order vector norm for certain dimension of Tensor `input`. Args: input (Variable): Tensor, data type float32, float64. porder (float, optional): None for porder=2.0. axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ if paddle.in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, 'porder', porder, 'axis', axis, 'keepdim', keepdim, 'asvector', asvector) if porder is not None: check_type(porder, 'porder', (float, int), 'p_norm') if axis is not None: check_type(axis, 'axis', (int), 'p_norm') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'p_norm') attrs = { 'axis': axis if axis is not None else -1, 'porder': float(porder) if porder is not None else 2.0, 'keepdim': keepdim, 'asvector': asvector, 'epsilon': 1e-12, } helper = LayerHelper('p_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op( type='p_norm', inputs={'X': input}, outputs={'Out': out}, attrs=attrs) return out def inf_norm(input, porder=None, axis=axis, keepdim=False, asvector=False, name=None): helper = LayerHelper('frobenius_norm', **locals()) out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out}) reduce_out = helper.create_variable_for_type_inference( dtype=helper.input_dtype()) reduce_all = True if axis == None or axis == [] or asvector == True else False axis = axis if axis != None and axis != [] else [0] reduce_type = 'reduce_max' if porder == np.float( 'inf') else 'reduce_min' helper.append_op( type=reduce_type, inputs={'X': out}, outputs={'Out': reduce_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) return reduce_out def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None): """ NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': abs_out}, outputs={'Out': pow_out}, attrs={'factor': porder}) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': True if axis is None else False }) porder block.append_op( type='pow', inputs={'X': sum_out}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out if axis is None and p is not None: if isinstance(p, str): if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, porder=p, axis=axis, keepdim=keepdim, asvector=True, name=name) else: raise ValueError("only valid p type is string or float, found {}". format(type(p))) if isinstance(axis, tuple): axis = list(axis) if isinstance(axis, list) and len(axis) == 1: axis = axis[0] #calculate vector norm, where axis is int or list with only one integer if isinstance(axis, int): if isinstance(p, str): if p == "fro": return vector_norm( x, porder=2, axis=axis, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "only valid string values are 'fro', found {}".format(p)) elif isinstance(p, (int, float)): return vector_norm( x, axis=axis, porder=p, keepdim=keepdim, asvector=False, name=name) else: raise ValueError( "unspport p for p-order vector norm. except float, found {}". format(p)) #calculate matrix norm, where axis is list with two integers elif isinstance(axis, list) and len(axis) == 2: if p == "fro": return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name) elif p == np.inf or p == -np.inf: return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name) elif p == 0: raise ValueError( "just suport axis type int or list (length of list <=1) if p = 0, found {}". format(axis)) else: return p_matrix_norm( x, porder=p, axis=axis, keepdim=keepdim, name=name) else: raise ValueError( "except axis type int or list (length of list <=2), found {}". format(axis)) def dist(x, y, p=2, name=None): r""" This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure of distance. The shapes of x and y must be broadcastable. The definition is as follows, for details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_: - Each input has at least one dimension. - Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist. Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be obtained as follows: 1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the tensor with fewer dimensions. For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the dimension of y. x (4-D Tensor): 8 x 1 x 6 x 1 y (4-D Tensor): 1 x 7 x 1 x 5 2. Determine the size of each dimension of the output z: choose the maximum value from the two input dimensions. z (4-D Tensor): 8 x 7 x 6 x 5 If the number of dimensions of the two inputs are the same, the size of the output can be directly determined in step 2. When p takes different values, the norm formula is as follows: When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z. .. math:: ||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p} When p = inf, the inf-norm of z is the maximum element of z. .. math:: ||z||_\infty=\max_i |z_i| When p = -inf, the negative-inf-norm of z is the minimum element of z. .. math:: ||z||_{-\infty}=\min_i |z_i| Otherwise, the p-norm of z follows the formula, .. math:: ||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}} Args: x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64. p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2. Returns: Tensor: Tensor that is the p-norm of (x - y). Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32") y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32") out = paddle.dist(x, y, 0) print(out) # out = [1.] out = paddle.dist(x, y, 2) print(out) # out = [2.] out = paddle.dist(x, y, float("inf")) print(out) # out = [2.] out = paddle.dist(x, y, float("-inf")) print(out) # out = [0.] """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist') check_type(p, 'p', (float, int), 'dist') helper = LayerHelper("dist", **locals()) out = helper.create_variable_for_type_inference(x.dtype) inputs = {"X": [x], "Y": [y]} outputs = {'Out': [out]} attrs = {"p": float(p)} helper.append_op( type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def cond(x, p=None, name=None): """ Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``. Args: x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``. And the input data type could be ``float32`` or ``float64``. p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`, `inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: computing results of condition number, its data type is the same as input Tensor ``x``. Examples: .. code-block:: python import paddle import numpy as np x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]]) # compute conditional number when p is None out = paddle.linalg.cond(x) # out.numpy() [1.4142135] # compute conditional number when order of the norm is 'fro' out_fro = paddle.linalg.cond(x, p='fro') # out_fro.numpy() [3.1622777] # compute conditional number when order of the norm is 'nuc' out_nuc = paddle.linalg.cond(x, p='nuc') # out_nuc.numpy() [9.2426405] # compute conditional number when order of the norm is 1 out_1 = paddle.linalg.cond(x, p=1) # out_1.numpy() [2.] # compute conditional number when order of the norm is -1 out_minus_1 = paddle.linalg.cond(x, p=-1) # out_minus_1.numpy() [1.] # compute conditional number when order of the norm is 2 out_2 = paddle.linalg.cond(x, p=2) # out_2.numpy() [1.4142135] # compute conditional number when order of the norm is -1 out_minus_2 = paddle.linalg.cond(x, p=-2) # out_minus_2.numpy() [0.70710677] # compute conditional number when order of the norm is inf out_inf = paddle.linalg.cond(x, p=np.inf) # out_inf.numpy() [2.] # compute conditional number when order of the norm is -inf out_minus_inf = paddle.linalg.cond(x, p=-np.inf) # out_minus_inf.numpy() [1.] a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32')) # a.numpy() # [[[ 0.14063153 -0.996288 0.7996131 -0.02571543] # [-0.16303636 1.5534962 -0.49919784 -0.04402903] # [-1.1341571 -0.6022629 0.5445269 0.29154757] # [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]] # [[-0.58081484 0.12402827 0.7229862 -0.55046535] # [-0.15178485 -1.1604939 0.75810957 0.30971205] # [-0.9669573 1.0940945 -0.27363303 -0.35416734] # [-1.216529 2.0018666 -0.7773689 -0.17556527]]] a_cond_fro = paddle.linalg.cond(a, p='fro') # a_cond_fro.numpy() [31.572273 28.120834] b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64')) # b.numpy() # [[[ 1.61707487 0.46829144 0.38130416 0.82546736] # [-1.72710298 0.08866375 -0.62518804 0.16128892] # [-0.02822879 -1.67764516 0.11141444 0.3220113 ]] # [[ 0.22524372 0.62474921 -0.85503233 -1.03960523] # [-0.76620689 0.56673047 0.85064753 -0.45158196] # [ 1.47595418 2.23646462 1.5701758 0.10497519]]] b_cond_2 = paddle.linalg.cond(b, p=2) # b_cond_2.numpy() [3.30064451 2.51976252] """ def mat_norm(input, porder=1., axis=None): """ NOTE: Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False axis = axis if axis != None and axis != [] else [0] keepdim = False if paddle.in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 1 or porder == np.inf: return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == -1 or porder == -np.inf: return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim', keepdim, 'reduce_all', reduce_all) block = LayerHelper('norm', **locals()) abs_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='abs', inputs={'X': input}, outputs={'Out': abs_out}) block.append_op( type='reduce_sum', inputs={'X': abs_out}, outputs={'Out': sum_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 1 or porder == np.inf: block.append_op( type='reduce_max', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) if porder == -1 or porder == -np.inf: block.append_op( type='reduce_min', inputs={'X': sum_out}, outputs={'Out': out}, attrs={ 'dim': [-1], 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out def fro_norm(input, porder=2, axis=[-1]): """ NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ reduce_all = True if axis is None or axis == [] else False keepdim = False if paddle.in_dynamic_mode(): pow_out = _C_ops.pow(input, 'factor', porder) sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.pow(sum_out_2, 'factor', float(1. / porder)) block = LayerHelper('norm', **locals()) pow_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_1 = block.create_variable_for_type_inference( dtype=block.input_dtype()) sum_out_2 = block.create_variable_for_type_inference( dtype=block.input_dtype()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='pow', inputs={'X': input}, outputs={'Out': pow_out}, attrs={'factor': porder}) block.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': sum_out_1}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_sum', inputs={'X': sum_out_1}, outputs={'Out': sum_out_2}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='pow', inputs={'X': sum_out_2}, outputs={'Out': out}, attrs={'factor': float(1. / porder)}) return out def svd_norm(input, porder, axis=[-1]): """ NOTE: Calculate the matrix norm, which is related to singular values, of a matrix or batches of matrices, including nuclear norm, 2-norm and (-2)-norm. """ reduce_all = True if axis is None or axis == [] else False keepdim = False u, s, vh = svd(input, full_matrices=False) if paddle.in_dynamic_mode(): if porder == "nuc": return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) if porder == 2: return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis, 'use_mkldnn', False) if porder == -2: return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis, 'use_mkldnn', False) block = LayerHelper('norm', **locals()) out = block.create_variable_for_type_inference( dtype=block.input_dtype()) if porder == "nuc": block.append_op( type='reduce_sum', inputs={'X': s}, outputs={'Out': out}, attrs={ 'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all }) return out max_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) min_out = block.create_variable_for_type_inference( dtype=block.input_dtype()) block.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) block.append_op( type='reduce_min', inputs={'X': s}, outputs={'Out': min_out}, attrs={'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all}) if porder == 2: block.append_op( type='elementwise_div', inputs={'X': max_out, 'Y': min_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out if porder == -2: block.append_op( type='elementwise_div', inputs={'X': min_out, 'Y': max_out}, outputs={'Out': out}, attrs={'aixs': axis, 'use_mkldnn': False}) return out def empty_tensor(input, shape): if paddle.in_dynamic_mode(): return input.reshape(shape) raise ValueError("only support x is nonempty tensor in static mode") x_shape = list(x.shape) if not len(x_shape) >= 2: raise ValueError("input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format( len(x_shape))) if p == None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]: if x_size == 0: return empty_tensor(x, x_shape[:-2]) x_inv = x.inverse() if p == "fro": return fro_norm(x) * fro_norm(x_inv) if p == "nuc": return svd_norm(x, p) * svd_norm(x_inv, p) if p in (1, -1): return mat_norm( x, porder=p, axis=[-2]) * mat_norm( x_inv, porder=p, axis=[-2]) if p in (np.inf, -np.inf): return mat_norm( x, porder=p, axis=[-1]) * mat_norm( x_inv, porder=p, axis=[-1]) else: raise ValueError("only support p is {} when input is a ".format(p) + "square matrix or batches of square matrices") elif p in (2, -2): if x_size == 0: return empty_tensor(x, x_shape[:-2]) return svd_norm(x, porder=p) else: raise ValueError( "unsupported {} for p, only supporting ('fro', 'nuc', ".format( p) + "1, -1, 2, -2, inf, -inf) or none") def dot(x, y, name=None): """ This operator calculates inner product for vectors. .. note:: Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix is the batch dimension, which means that the vectors of multiple batches are dotted. Parameters: x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: the calculated result Tensor. Examples: .. code-block:: python import paddle import numpy as np x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) y_data = np.random.uniform(1, 3, [10]).astype(np.float32) x = paddle.to_tensor(x_data) y = paddle.to_tensor(y_data) z = paddle.dot(x, y) print(z) """ op_type = 'dot' # skip var type check in dygraph mode to improve efficiency if paddle.in_dynamic_mode(): op = getattr(_C_ops, op_type) return op(x, y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type) check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], op_type) check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'], op_type) helper = LayerHelper(op_type, **locals()) if name is None: out = helper.create_variable_for_type_inference(dtype=x.dtype) else: out = helper.create_variable( name=name, dtype=x.dtype, persistable=False) helper.append_op( type="dot", inputs={'X': x, 'Y': y}, attrs={}, outputs={"Out": out}) return out def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): """ Estimate the covariance matrix of the input variables, given data and weights. A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix. For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself. Parameters: x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below. rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: Tensor: The covariance matrix Tensor of the variables. Examples: .. code-block:: python import paddle xt = paddle.rand((3,4)) paddle.linalg.cov(xt) ''' Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, [[0.07918842, 0.06127326, 0.01493049], [0.06127326, 0.06166256, 0.00302668], [0.01493049, 0.00302668, 0.01632146]]) ''' """ op_type = 'cov' if len(x.shape) > 2 or len(x.shape) < 1: raise ValueError( "Input(x) only support N-D (1<=N<=2) tensor in cov, but received " "length of Input(input) is %s." % len(x.shape)) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov') nx = x if len(x.shape) == 1: nx = x.reshape((1, -1)) if not rowvar and nx.shape[0] != 1: nx = nx.t() w = None observation_num = nx.shape[1] if fweights is not None: w = fweights.astype(nx.dtype) if len(w.shape) > 1: raise ValueError( "Input(fweights) only support N-D (N<=1) tensor in cov, but received " "shape of Input(input) is %s." % len(fweights.shape)) if fweights.shape[0] != observation_num: raise ValueError( "The number of Input(fweights) should equal to x's dim[1]: {}, but received " "size of Input(fweights) is {}.".format(observation_num, fweights.shape[0])) if fweights.min() < 0: raise ValueError( "The value of Input(fweights) cannot be negtive, but received " "min of Input(fweights) is {}.".format(fweights.min())) if not paddle.all(fweights == paddle.round(fweights.astype('float64'))): raise ValueError("Input(fweights) must be integer ") if aweights is not None: aw = aweights.astype(nx.dtype) if len(aw.shape) > 1: raise ValueError( "Input(aweights) only support N-D (N<=1) tensor in cov, but received " "length of Input(input) is %s." % len(aweights.shape)) check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'], 'cov') if aweights.shape[0] != observation_num: raise ValueError( "The number of Input(aweights) should equal to x's dim[1]: {}, but received " "size of Input(aweights) is {}.".format(observation_num, aweights.shape[0])) if aweights.min() < 0: raise ValueError( "The value of Input(aweights) cannot be negtive, but received " "min of Input(aweights) is {}.".format(aweights.min())) if w is not None: w = w * aw else: w = aw w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype) if fweights is not None or aweights is not None: w_sum = w.sum() if w_sum.item() == 0: raise ValueError("The sum of weights is zero, can't be normalized.") if w is not None: nx_w = nx * w avg = (nx_w).sum(axis=1) / w_sum else: avg = nx.sum(axis=1) / w_sum nx_w = nx if w is not None and aweights is not None and ddof == True: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof if norm_factor <= 0: norm_factor = paddle.to_tensor(0, dtype=nx.dtype) nx = nx - avg.unsqueeze(1) xxt = paddle.mm(nx, nx_w.t().conj()) cov = paddle.divide(xxt, norm_factor).squeeze() return cov def t(input, name=None): """ Transpose <=2-D tensor. 0-D and 1-D tensors are returned as it is and 2-D tensor is equal to the paddle.transpose function which perm dimensions set 0 and 1. Args: input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. For Example: .. code-block:: text # Example 1 (0-D tensor) x = tensor([0.79]) paddle.t(x) = tensor([0.79]) # Example 2 (1-D tensor) x = tensor([0.79, 0.84, 0.32]) paddle.t(x) = tensor([0.79, 0.84, 0.32]) # Example 3 (2-D tensor) x = tensor([0.79, 0.84, 0.32], [0.64, 0.14, 0.57]) paddle.t(x) = tensor([0.79, 0.64], [0.84, 0.14], [0.32, 0.57]) Examples: .. code-block:: python import paddle x = paddle.ones(shape=[2, 3], dtype='int32') x_transposed = paddle.t(x) print(x_transposed.shape) # [3, 2] """ if len(input.shape) > 2: raise ValueError( "Input(input) only support N-D (N<=2) tensor, but received " "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape)) if paddle.in_dynamic_mode(): if len(input.shape) == 1: return input # 2-D tensor perm = [1, 0] out, _ = _C_ops.transpose2(input, 'axis', perm) return out check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'], 'transpose') helper = LayerHelper('t', **locals()) out = helper.create_variable_for_type_inference(input.dtype) input_shape = helper.create_variable_for_type_inference(input.dtype) if len(input.shape) == 1: out = input else: helper.append_op( type='transpose2', inputs={'X': [input]}, outputs={'Out': [out], 'XShape': [input_shape]}, attrs={'axis': [1, 0]}) return out def cross(x, y, axis=None, name=None): """ Computes the cross product between two tensors along an axis. Inputs must have the same shape, and the length of their axes should be equal to 3. If `axis` is not given, it defaults to the first axis found with the length 3. Args: x (Tensor): The first input tensor. y (Tensor): The second input tensor. axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor. A Tensor with same data type as `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]]) y = paddle.to_tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]) z1 = paddle.cross(x, y) # [[-1. -1. -1.] # [ 2. 2. 2.] # [-1. -1. -1.]] z2 = paddle.cross(x, y, axis=1) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] """ if in_dygraph_mode(): return _C_ops.final_state_cross(x, y, axis) else: if _in_legacy_dygraph(): if axis is not None: return _C_ops.cross(x, y, 'dim', axis) else: return _C_ops.cross(x, y) else: helper = LayerHelper("cross", **locals()) out = helper.create_variable_for_type_inference(x.dtype) attrs = dict() attrs['dim'] = axis helper.append_op( type='cross', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs=attrs) return out def cholesky(x, upper=False, name=None): r""" Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` , and the returned matrix :math:`U` is upper-triangular. Otherwise, the decomposition has the form :math:`A = LL^{T}` , and the returned matrix :math:`L` is lower-triangular. Args: x (Tensor): The input tensor. Its shape should be `[*, M, M]`, where * is zero or more batch dimensions, and matrices on the inner-most 2 dimensions all should be symmetric positive-definite. Its data type should be float32 or float64. upper (bool): The flag indicating whether to return upper or lower triangular matrices. Default: False. Returns: Tensor: A Tensor with same shape and data type as `x`. It represents \ triangular matrices generated by Cholesky decomposition. Examples: .. code-block:: python import paddle import numpy as np a = np.random.rand(3, 3) a_t = np.transpose(a, [1, 0]) x_data = np.matmul(a, a_t) + 1e-03 x = paddle.to_tensor(x_data) out = paddle.linalg.cholesky(x, upper=False) print(out) # [[1.190523 0. 0. ] # [0.9906703 0.27676893 0. ] # [1.25450498 0.05600871 0.06400121]] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky', inputs={'X': [x]}, outputs={'Out': out}, attrs={'upper': upper}) return out def matrix_rank(x, tol=None, hermitian=False, name=None): r""" Computes the rank of a matrix. The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False, or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True. Args: x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64. tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch. hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian, enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use the lower triangular of the matrix to compute. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: Rank of tensor x. Examples: .. code-block:: python import paddle a = paddle.eye(10) b = paddle.linalg.matrix_rank(a) print(b) # b = [10] c = paddle.ones(shape=[3, 4, 5, 5]) d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True) print(d) # d = [[1, 1, 1, 1], # [1, 1, 1, 1], # [1, 1, 1, 1]] """ if paddle.in_dynamic_mode(): if tol is None: tol_tensor = None tol_attr = 0.0 use_default_tol = True elif isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) else: tol_tensor = tol tol_attr = 0.0 use_default_tol = False else: tol_tensor = None tol_attr = float(tol) use_default_tol = False return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian', hermitian, 'use_default_tol', use_default_tol) inputs = {} attrs = {} check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank') inputs['X'] = x if tol is None: attrs['use_default_tol'] = True elif isinstance(tol, Variable): check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank') attrs['use_default_tol'] = False if tol.dtype != x.dtype: inputs['TolTensor'] = cast(tol, x.dtype) else: inputs['TolTensor'] = tol else: check_type(tol, 'tol', float, 'matrix_rank') attrs['use_default_tol'] = False attrs['tol'] = tol check_type(hermitian, 'hermitian', bool, 'matrix_rank') attrs['hermitian'] = hermitian helper = LayerHelper('matrix_rank', **locals()) out = helper.create_variable_for_type_inference(dtype='int32') helper.append_op( type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out def bmm(x, y, name=None): """ Applies batched matrix multiplication to two tensors. Both of the two input tensors must be three-dementional and share the same batch size. if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor. Args: x (Tensor): The input Tensor. y (Tensor): The input Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The product Tensor. Examples: .. code-block:: python import paddle # In imperative mode: # size x: (2, 2, 3) and y: (2, 3, 2) x = paddle.to_tensor([[[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]]) y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]], [[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]]) out = paddle.bmm(x, y) #output size: (2, 2, 2) #output value: #[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]] out_np = out.numpy() """ x_shape = x.shape y_shape = y.shape if not len(x_shape) == len(y_shape) == 3: raise ValueError( "x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}". format(x_shape, y_shape)) if x_shape[2] != y_shape[1]: raise ValueError( "x's width must be equal with y's height. But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if x_shape[0] != y_shape[0]: raise ValueError( "x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}". format(x_shape, y_shape)) if paddle.in_dynamic_mode(): return _C_ops.bmm(x, y) helper = LayerHelper('bmm', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out}) return out def histogram(input, bins=100, min=0, max=0, name=None): """ Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. If min and max are both zero, the minimum and maximum values of the data are used. Args: input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor should be float32, float64, int32, int64. bins (int): number of histogram bins min (int): lower end of the range (inclusive) max (int): upper end of the range (inclusive) Returns: Tensor: data type is int64, shape is (nbins,). Examples: .. code-block:: python import paddle inputs = paddle.to_tensor([1, 2, 1]) result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ if paddle.in_dynamic_mode(): return _C_ops.histogram(input, "bins", bins, "min", min, "max", max) helper = LayerHelper('histogram', **locals()) check_variable_and_dtype( input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram') out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( type='histogram', inputs={'X': input}, outputs={'Out': out}, attrs={'bins': bins, 'min': min, 'max': max}) return out def bincount(x, weights=None, minlength=0, name=None): """ Computes frequency of each value in the input tensor. Args: x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor. weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None. minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor of frequency. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1, 2, 1, 4, 5]) result1 = paddle.bincount(x) print(result1) # [0, 2, 1, 0, 1, 1] w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5]) result2 = paddle.bincount(x, weights=w) print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000] """ if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") if paddle.in_dynamic_mode(): return _C_ops.bincount(x, weights, "minlength", minlength) helper = LayerHelper('bincount', **locals()) check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount') if weights is not None: check_variable_and_dtype(weights, 'Weights', ['int32', 'int64', 'float32', 'float64'], 'bincount') out = helper.create_variable_for_type_inference(dtype=weights.dtype) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='bincount', inputs={'X': x, 'Weights': weights}, outputs={'Out': out}, attrs={'minlength': minlength}) return out def mv(x, vec, name=None): """ Performs a matrix-vector product of the matrix x and the vector vec. Args: x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x should be one of float32, float64. vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x should be one of float32, float64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor which is producted by x and vec. Examples: .. code-block:: python # x: [M, N], vec: [N] # paddle.mv(x, vec) # out: [M] import numpy as np import paddle x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64") x = paddle.to_tensor(x_data) vec_data = np.array([3, 5, 1]) vec = paddle.to_tensor(vec_data).astype("float64") out = paddle.mv(x, vec) """ if in_dygraph_mode(): return _C_ops.final_state_mv(x, vec) else: if _in_legacy_dygraph(): out = _C_ops.mv(x, vec) return out else: def __check_input(x, vec): var_names = {'x': x, 'vec': vec} for name, val in var_names.items(): check_variable_and_dtype(val, name, ['float32', 'float64'], 'mv') x_shape = list(x.shape) vec_shape = list(vec.shape) if len(x_shape) != 2: raise ValueError( "x should be 2-dimensional. But received x's dimention: {}". format(x_shape)) if len(vec_shape) != 1: raise ValueError( "vec should be 1-dimensional. But received vec's dimention: {}". format(vec_shape)) __check_input(x, vec) helper = LayerHelper('mv', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='mv', inputs={'X': x, 'Vec': vec}, outputs={'Out': out}) return out def det(x, name=None): """ Calculates determinant value of a square matrix or batches of square matrices. Args: x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size `(*, n, n)` where `*` is one or more batch dimensions. Returns: y (Tensor):the determinant value of a square matrix or batches of square matrices. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.det(x) print(A) # [ 0.02547996, 2.52317095, -6.15900707]) """ if paddle.in_dynamic_mode(): return _C_ops.determinant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('determinant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def slogdet(x, name=None): """ Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. The determinant can be computed with ``sign * exp(logabsdet) Supports input of float, double Note that for matrices that have zero determinant, this returns ``(0, -inf)`` Args: x (Tensor): the batch of matrices of size :math:`(*, n, n)` where math:`*` is one or more batch dimensions. Returns: y (Tensor): A tensor containing the sign of the determinant and the natural logarithm of the absolute value of determinant, respectively. Examples: .. code-block:: python import paddle x = paddle.randn([3,3,3]) A = paddle.linalg.slogdet(x) print(A) # [[ 1. , 1. , -1. ], # [-0.98610914, -0.43010661, -0.10872950]]) """ if paddle.in_dynamic_mode(): return _C_ops.slogdeterminant(x) check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') input_shape = list(x.shape) assert len(input_shape) >= 2, \ "The x must be at least 2-dimensional, " \ "but received Input x's dimensional: %s.\n" % \ len(input_shape) assert (input_shape[-1] == input_shape[-2]), \ "Expect squared input," \ "but received %s by %s matrix.\n" \ %(input_shape[-2], input_shape[-1]) \ helper = LayerHelper('slogdeterminant', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]}) return out def svd(x, full_matrices=False, name=None): r""" Computes the singular value decomposition of one matrix or a batch of regular matrices. Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies: .. math:: X = U * diag(S) * VT Args: x (Tensor): The input tensor. Its shape should be `[..., N, M]`, where `...` is zero or more batch dimensions. N and M can be arbitraty positive number. Note that if x is sigular matrices, the grad is numerical instable. The data type of x should be float32 or float64. full_matrices (bool): A flag to control the behavor of svd. If full_matrices = True, svd op will compute full U and V matrics, which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N). If full_matrices = False, svd op will use a economic method to store U and V. which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N). name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]` Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64') x = x.reshape([3, 2]) u, s, vh = paddle.linalg.svd(x) print (u) #U = [[ 0.27364809, -0.21695147 ], # [ 0.37892198, -0.87112408 ], # [ 0.8840446 , 0.44053933 ]] print (s) #S = [8.14753743, 0.78589688] print (vh) #VT= [[ 0.51411221, 0.85772294], # [ 0.85772294, -0.51411221]] # one can verify : U * S * VT == X # U * UH == I # V * VH == I """ if paddle.in_dynamic_mode(): return _C_ops.svd(x, 'full_matrices', full_matrices) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') check_type(full_matrices, 'full_matrices', bool, 'svd') helper = LayerHelper('svd', **locals()) u = helper.create_variable_for_type_inference(dtype=x.dtype) vh = helper.create_variable_for_type_inference(dtype=x.dtype) s = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['full_matrices'] = full_matrices helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vh, 'S': s}, attrs=attrs, ) return u, s, vh def matrix_power(x, n, name=None): r""" Computes the n-th power of a square matrix or a batch of square matrices. Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be an exponent, the equation should be: .. math:: Out = X ^ {n} Specifically, - If `n > 0`, it returns the matrix or a batch of matrices raised to the power of `n`. - If `n = 0`, it returns the identity matrix or a batch of identity matrices. - If `n < 0`, it returns the inverse of each matrix (if invertible) raised to the power of `abs(n)`. Args: x (Tensor): A square matrix or a batch of square matrices to be raised to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. n (int): The exponent. It can be any positive, negative integer or zero. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its data type should be the same as that of `x`. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1, 2, 3], [1, 4, 9], [1, 8, 27]], dtype='float64') print(paddle.linalg.matrix_power(x, 2)) # [[6. , 34. , 102.], # [14. , 90. , 282.], # [36. , 250., 804.]] print(paddle.linalg.matrix_power(x, 0)) # [[1., 0., 0.], # [0., 1., 0.], # [0., 0., 1.]] print(paddle.linalg.matrix_power(x, -2)) # [[ 12.91666667, -12.75000000, 2.83333333 ], # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ if paddle.in_dynamic_mode(): return _C_ops.matrix_power(x, "n", n) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power') check_type(n, 'n', int, 'matrix_power') helper = LayerHelper('matrix_power', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matrix_power', inputs={'X': x}, outputs={'Out': out}, attrs={'n': n}) return out def qr(x, mode="reduced", name=None): r""" Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now). Args: x (Tensor): The input tensor. Its shape should be `[..., M, N]`, where ... is zero or more batch dimensions. M and N can be arbitrary positive number. The data type of x should be float32 or float64. mode (str, optional): A flag to control the behavior of qr, the default is "reduced". Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`: If mode = "reduced", qr op will return reduced Q and R matrices, which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`. If mode = "complete", qr op will return complete Q and R matrices, which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`. If mode = "r", qr op will only return reduced R matrix, which means R's shape is `[..., K, N]`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R. If mode = "r", qr will return a tensor which represents R. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') q, r = paddle.linalg.qr(x) print (q) print (r) # Q = [[-0.16903085, 0.89708523], # [-0.50709255, 0.27602622], # [-0.84515425, -0.34503278]]) # R = [[-5.91607978, -7.43735744], # [ 0. , 0.82807867]]) # one can verify : X = Q * R ; """ if paddle.in_dynamic_mode(): q, r = _C_ops.qr(x, 'mode', mode) if mode == "r": return r else: return q, r check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr') check_type(mode, 'mode', str, 'qr') helper = LayerHelper('qr', **locals()) q = helper.create_variable_for_type_inference(dtype=x.dtype) r = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['mode'] = mode helper.append_op( type='qr', inputs={'X': [x]}, outputs={'Q': q, 'R': r}, attrs=attrs) if mode == "r": return r else: return q, r def lu(x, pivot=True, get_infos=False, name=None): r""" Computes the LU factorization of an N-D(N>=2) matrix x. Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and upper triangular matrix U are combined to a single LU matrix. Pivoting is done if pivot is set to True. P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) # return ones Args: X (Tensor): the tensor to factor of N-dimensions(N>=2). pivot (bool, optional): controls whether pivoting is done. Default: True. get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: factorization (Tensor): LU matrix, the factorization of input X. pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the intermediate transpositions of rows. The final permutation `perm` could be reconstructed by this, details refer to upper example. infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2)) where non-zero values indicate whether factorization for the matrix or each minibatch has succeeded or failed. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot) if get_infos: return LU, Piv, Info else: return LU, Piv check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') helper = LayerHelper('lu', **locals()) lu = helper.create_variable_for_type_inference(dtype=x.dtype) p = helper.create_variable_for_type_inference(dtype='int') info = helper.create_variable_for_type_inference(dtype='int') attrs = dict() attrs['pivots'] = pivot helper.append_op( type='lu', inputs={'X': x}, outputs={'Out': lu, 'Pivots': p, 'Infos': info}, attrs=attrs) if get_infos: return lu, p, info else: return lu, p def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): r""" Unpack L U and P to single matrix tensor . unpack L and U matrix from LU, unpack permutation matrix P from Pivtos . P mat can be get by pivots: # ones = eye(rows) #eye matrix of rank rows # for i in range(cols): # swap(ones[i], ones[pivots[i]]) Args: x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U. y (Tensor): Pivots get from paddle.linalg.lu. unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True. unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: P (Tensor): Permutation matrix P of lu factorization. L (Tensor): The lower triangular matrix tensor of lu factorization. U (Tensor): The upper triangular matrix tensor of lu factorization. Examples: .. code-block:: python import paddle x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64') lu,p,info = paddle.linalg.lu(x, get_infos=True) # >>> lu: # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0.20000000, 0.80000000], # [0.60000000, 0.50000000]]) # >>> p # Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # [3, 3]) # >>> info # Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True, # 0) P,L,U = paddle.linalg.lu_unpack(lu,p) # >>> P # (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[0., 1., 0.], # [0., 0., 1.], # [1., 0., 0.]]), # >>> L # Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[1. , 0. ], # [0.20000000, 1. ], # [0.60000000, 0.50000000]]), # >>> U # Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True, # [[5. , 6. ], # [0. , 0.80000000]])) # one can verify : X = P @ L @ U ; """ if paddle.in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata, 'unpack_pivots', unpack_pivots) return P, L, U check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack') helper = LayerHelper('lu_unpack', **locals()) p = helper.create_variable_for_type_inference(dtype=x.dtype) l = helper.create_variable_for_type_inference(dtype=x.dtype) u = helper.create_variable_for_type_inference(dtype=x.dtype) attrs = dict() attrs['unpack_ludata'] = unpack_ludata attrs['unpack_pivots'] = unpack_pivots helper.append_op( type='lu_unpack', inputs={'X': x, 'Pivots': y}, outputs={'Pmat': p, 'L': l, 'U': u}, attrs=attrs) return p, l, u def eig(x, name=None): """ This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices. .. note:: If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster. If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead. If the matrix is of any shape, please use :ref:`paddle.linalg.svd`. This API is only supported on CPU device. The output datatype is always complex for both real and complex input. Args: x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``, ``float64``, ``compplex64`` or ``complex128``. name (str, optional): The default value is `None`. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values. Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors. Examples: .. code-block:: python import paddle import numpy as np paddle.device.set_device("cpu") x_data = np.array([[1.6707249, 7.2249975, 6.5045543], [9.956216, 8.749598, 6.066444 ], [4.4251957, 1.7983172, 0.370647 ]]).astype("float32") x = paddle.to_tensor(x_data) w, v = paddle.linalg.eig(x) print(w) # Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) , # (0.18518077798279986+0j)], # [(-0.8308237755993192+0j) , (0.3463813401919749+0j) , # (-0.6837005269141947+0j) ], # [(-0.23142567697893396+0j), (0.4944999840400175+0j) , # (0.7058765252952796+0j) ]]) print(v) # Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False, # [ (16.50471283351188+0j) , (-5.5034820550763515+0j) , # (-0.21026087843552282+0j)]) """ if paddle.in_dynamic_mode(): w, v = _C_ops.eig(x) return w, v check_variable_and_dtype( x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig') helper = LayerHelper('eig', **locals()) w = helper.create_variable_for_type_inference(x.dtype) v = helper.create_variable_for_type_inference(x.dtype) inputs = {'X': x} outputs = {'Eigenvalues': w, 'Eigenvectors': v} helper.append_op(type='eig', inputs=inputs, outputs=outputs) return w, v def eigvals(x, name=None): """ Compute the eigenvalues of one or more general matrices. Warning: The gradient kernel of this operator does not yet developed. If you need back propagation through this operator, please replace it with paddle.linalg.eig. Args: x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32, float64, complex64, or complex128. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`. The eigenvalues are complex-valued even when `x` is real. Examples: .. code-block:: python import paddle paddle.set_device("cpu") paddle.seed(1234) x = paddle.rand(shape=[3, 3], dtype='float64') # [[0.02773777, 0.93004224, 0.06911496], # [0.24831591, 0.45733623, 0.07717843], # [0.48016702, 0.14235102, 0.42620817]]) print(paddle.linalg.eigvals(x)) # [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128 """ check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals') x_shape = list(x.shape) if len(x_shape) < 2: raise ValueError( "The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}". format(len(x_shape), x_shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The last two dimensions of Input(x) should be equal, but received x's shape = {}". format(x_shape)) if paddle.in_dynamic_mode(): return _C_ops.eigvals(x) helper = LayerHelper('eigvals', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out}) return out def multi_dot(x, name=None): """ Multi_dot is an operator that calculates multiple matrix multiplications. Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not support batched inputs. The input tensor in [x] must be 2-D except for the first and last can be 1-D. If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it is treated as a column vector of shape(n, 1). If the first and last tensor are 2-D matrix, then the output is also 2-D matrix, otherwise the output is a 1-D vector. Multi_dot will select the lowest cost multiplication order for calculation. The cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c. Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively, we can calculate the cost of different multiplication orders as follows: - Cost((AB)C) = 20x5x100 + 20x100x10 = 30000 - Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000 In this case, multiplying B and C first, then multiply A, which is 5 times faster than sequential calculation. Args: x ([Tensor]): The input tensors which is a list Tensor. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The output Tensor. Examples: .. code-block:: python import paddle import numpy as np # A * B A_data = np.random.random([3, 4]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) out = paddle.linalg.multi_dot([A, B]) print(out.numpy().shape) # [3, 5] # A * B * C A_data = np.random.random([10, 5]).astype(np.float32) B_data = np.random.random([5, 8]).astype(np.float32) C_data = np.random.random([8, 7]).astype(np.float32) A = paddle.to_tensor(A_data) B = paddle.to_tensor(B_data) C = paddle.to_tensor(C_data) out = paddle.linalg.multi_dot([A, B, C]) print(out.numpy().shape) # [10, 7] """ if paddle.in_dynamic_mode(): return _C_ops.multi_dot(x) check_type(x, 'x', (list, tuple), 'multi_dot') for id, item in enumerate(x): check_variable_and_dtype(item, 'x[' + str(id) + ']', ['float16', 'float32', 'float64'], 'multi_dot') if item.dtype != x[0].dtype: raise TypeError( "All the Tensors in the input must have the same data type.") helper = LayerHelper('multi_dot', **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out}) return out def eigh(x, UPLO='L', name=None): """ Compute the eigenvalues and eigenvectors of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix, "'U' represents the upper triangular matrix.". name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op. out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value, out_vector = paddle.linalg.eigh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] print(out_vector) #[(-0.9238795325112867+0j), (-0.3826834323650898+0j)], #[ 0.3826834323650898j , -0.9238795325112867j ]] """ if paddle.in_dynamic_mode(): return _C_ops.eigh(x, 'UPLO', UPLO) def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigh', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO}) return out_value, out_vector def pinv(x, rcond=1e-15, hermitian=False, name=None): r""" Calculate pseudo inverse via SVD(singular value decomposition) of one matrix or batches of regular matrix. .. math:: if hermitian == False: x = u * s * vt (SVD) out = v * 1/s * ut else: x = u * s * ut (eigh) out = u * 1/s * u.conj().transpose(-2,-1) If x is hermitian or symmetric matrix, svd will be replaced with eigh. Args: x(Tensor): The input tensor. Its shape should be (*, m, n) where * is zero or more batch dimensions. m and n can be arbitraty positive number. The data type of x should be float32 or float64 or complex64 or complex128. When data type is complex64 or cpmplex128, hermitian should be set True. rcond(Tensor, optional): the tolerance value to determine when is a singular value zero. Defalut:1e-15. hermitian(bool, optional): indicates whether x is Hermitian if complex or symmetric if real. Default: False. name(str|None): A name for this layer(optional). If set None, the layer will be named automatically. Returns: Tensor: The tensor with same data type with x. it represents pseudo inverse of x. Its shape should be (*, n, m). Examples: .. code-block:: python import paddle x = paddle.arange(15).reshape((3, 5)).astype('float64') input = paddle.to_tensor(x) out = paddle.linalg.pinv(input) print(input) print(out) # input: # [[0. , 1. , 2. , 3. , 4. ], # [5. , 6. , 7. , 8. , 9. ], # [10., 11., 12., 13., 14.]] # out: # [[-0.22666667, -0.06666667, 0.09333333], # [-0.12333333, -0.03333333, 0.05666667], # [-0.02000000, 0.00000000, 0.02000000], # [ 0.08333333, 0.03333333, -0.01666667], # [ 0.18666667, 0.06666667, -0.05333333]] # one can verify : x * out * x = x ; # or out * x * out = x ; """ if paddle.in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, 'full_matrices', False) max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=x.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=x.dtype) condition = s > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v, _ = _C_ops.transpose2(vt, 'axis', perm) out_1 = v * st out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y', True) return out_2 else: # combine eigh and matmul op s, u = _C_ops.eigh(x, 'UPLO', 'L') s_abs = paddle.abs(s) max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \ 'reduce_all', False) rcond = paddle.to_tensor(rcond, dtype=s.dtype) cutoff = rcond * max_singular_val y = float('inf') y = paddle.to_tensor(y, dtype=s.dtype) condition = s_abs > cutoff cond_int = layers.cast(condition, s.dtype) cond_not_int = layers.cast(layers.logical_not(condition), s.dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2]) out_1 = u * st u_conj = _C_ops.conj(u) out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y', True) return out_2 else: if not hermitian: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv') u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(dtype) vt = helper.create_variable_for_type_inference(dtype) helper.append_op( type='svd', inputs={'X': [x]}, outputs={'U': u, 'VH': vt, 'S': s}, attrs={'full_matrices': False}, ) max_singular_val = helper.create_variable_for_type_inference(dtype) helper.append_op( type='reduce_max', inputs={'X': s}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=dtype) condition = s > cutoff cond_int = layers.cast(condition, dtype) cond_not_int = layers.cast(layers.logical_not(condition), dtype) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=dtype) st_shape = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) dims = list(range(len(vt.shape))) perm = dims[:-2] + [dims[-1]] + [dims[-2]] v = helper.create_variable_for_type_inference(dtype) v_shape = helper.create_variable_for_type_inference(dtype) helper.append_op( type='transpose2', inputs={'X': [vt]}, outputs={'Out': [v], 'XShape': [v_shape]}, attrs={'axis': perm}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': v, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 else: helper = LayerHelper('pinv', **locals()) dtype = x.dtype check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'pinv') if dtype == paddle.complex128: s_type = 'float64' elif dtype == paddle.complex64: s_type = 'float32' else: s_type = dtype u = helper.create_variable_for_type_inference(dtype) s = helper.create_variable_for_type_inference(s_type) helper.append_op( type='eigh', inputs={'X': x}, outputs={'Eigenvalues': s, 'Eigenvectors': u}, attrs={'UPLO': 'L'}) s_abs = helper.create_variable_for_type_inference(s_type) helper.append_op( type='abs', inputs={'X': s}, outputs={'Out': s_abs}) max_singular_val = helper.create_variable_for_type_inference(s_type) helper.append_op( type='reduce_max', inputs={'X': s_abs}, outputs={'Out': max_singular_val}, attrs={'dim': [-1], 'keep_dim': True, 'reduce_all': False}) rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type) cutoff = rcond * max_singular_val y = float('inf') y = layers.fill_constant(shape=[1], value=y, dtype=s_type) condition = s_abs > cutoff cond_int = layers.cast(condition, s_type) cond_not_int = layers.cast(layers.logical_not(condition), s_type) out1 = layers.elementwise_mul(1 / s, cond_int) out2 = layers.elementwise_mul(1 / y, cond_not_int) singular = layers.elementwise_add(out1, out2) st = helper.create_variable_for_type_inference(dtype=s_type) st_shape = helper.create_variable_for_type_inference(dtype=s_type) helper.append_op( type='unsqueeze2', inputs={'X': singular}, attrs={'axes': [-2]}, outputs={'Out': st, 'XShape': st_shape}) out_1 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='elementwise_mul', inputs={'X': u, 'Y': st}, outputs={'Out': out_1}, attrs={'axis': -1, 'use_mkldnn': False}) out_1 = helper.append_activation(out_1) u_conj = helper.create_variable_for_type_inference(dtype) helper.append_op( type='conj', inputs={'X': u}, outputs={'Out': [u_conj]}) out_2 = helper.create_variable_for_type_inference(dtype) helper.append_op( type='matmul_v2', inputs={'X': out_1, 'Y': u_conj}, outputs={'Out': out_2}, attrs={'trans_x': False, 'trans_y': True}, ) return out_2 def solve(x, y, name=None): r""" Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'. Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be a vector/matrix or a batch of vectors/matrices, the equation should be: .. math:: Out = X^-1 * Y Specifically, - This system of linear equations has one solution if and only if input 'X' is invertible. Args: x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # 2*X0 + X1 = 9 # X0 + 2*X1 = 8 import paddle import numpy as np np_x = np.array([[3, 1],[1, 2]]) np_y = np.array([9, 8]) x = paddle.to_tensor(np_x, dtype="float64") y = paddle.to_tensor(np_y, dtype="float64") out = paddle.linalg.solve(x, y) print(out) # [2., 3.]) """ if paddle.in_dynamic_mode(): return _C_ops.solve(x, y) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="solve", inputs={"X": x, "Y": y}, outputs={"Out": out}) return out def triangular_solve(x, y, upper=True, transpose=False, unitriangular=False, name=None): r""" Computes the solution of a system of equations with a triangular coefficient matrix `x` and multiple right-hand sides `y` . Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular system of equations. Default: True. transpose (bool, optional): whether `x` should be transposed before calculation. Default: False. unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed to be 1 and not referenced from `x` . Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type should be the same as that of `x`. Examples: .. code-block:: python # a square system of linear equations: # x1 + x2 + x3 = 0 # 2*x2 + x3 = -9 # -x3 = 5 import paddle import numpy as np x = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") y = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.triangular_solve(x, y, upper=True) print(out) # [7, -2, -5] """ if paddle.in_dynamic_mode(): return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose', transpose, 'unitriangular', unitriangular) inputs = {"X": [x], "Y": [y]} helper = LayerHelper("triangular_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='triangular_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={ 'upper': upper, 'transpose': transpose, 'unitriangular': unitriangular }) return out def cholesky_solve(x, y, upper=False, name=None): r""" Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B. Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs is also batches. Args: x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is zero or more batch dimensions. Its data type should be float32 or float64. upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The solution of the system of equations. Its data type is the same as that of `x`. Examples: .. code-block:: python import paddle u = paddle.to_tensor([[1, 1, 1], [0, 2, 1], [0, 0,-1]], dtype="float64") b = paddle.to_tensor([[0], [-9], [5]], dtype="float64") out = paddle.linalg.cholesky_solve(b, u, upper=True) print(out) # [-2.5, -7, 9.5] """ if paddle.in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, 'upper', upper) helper = LayerHelper("cholesky_solve", **locals()) check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve') check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='cholesky_solve', inputs={'X': x, 'Y': y}, outputs={'Out': out}, attrs={'upper': upper}) return out def eigvalsh(x, UPLO='L', name=None): """ Computes the eigenvalues of a complex Hermitian (conjugate symmetric) or a real symmetric matrix. Args: x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x should be one of float32, float64, complex64, complex128. UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’). name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tensor: The tensor eigenvalues in ascending order. Examples: .. code-block:: python import numpy as np import paddle x_data = np.array([[1, -2j], [2j, 5]]) x = paddle.to_tensor(x_data) out_value = paddle.eigvalsh(x, UPLO='L') print(out_value) #[0.17157288, 5.82842712] """ if paddle.in_dynamic_mode(): is_test = x.stop_gradient values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test) return values def __check_input(x, UPLO): x_shape = list(x.shape) if len(x.shape) < 2: raise ValueError( "Input(input) only support >=2 tensor, but received " "length of Input(input) is %s." % len(x.shape)) if x_shape[-1] != x_shape[-2]: raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) __check_input(x, UPLO) helper = LayerHelper('eigvalsh', **locals()) check_variable_and_dtype(x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvalsh') out_value = helper.create_variable_for_type_inference(dtype=x.dtype) out_vector = helper.create_variable_for_type_inference(dtype=x.dtype) is_test = x.stop_gradient helper.append_op( type='eigvalsh', inputs={'X': x}, outputs={'Eigenvalues': out_value, 'Eigenvectors': out_vector}, attrs={'UPLO': UPLO, 'is_test': is_test}) return out_value def lstsq(x, y, rcond=None, driver=None, name=None): """ Computes a solution to the least squares problem of a system of linear equations. Args: x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x`` should be one of float32, float64. y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y`` should be one of float32, float64. rcond(float, optional): The default value is None. A float pointing number used to determine the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the machine precision of x_dtype. driver(str, optional): The default value is None. The name of LAPACK method to be used. For CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’ for CUDA inputs. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``). ``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals`` is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in (‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when ``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor. Examples: .. code-block:: python import paddle paddle.set_device("cpu") x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]]) y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]]) results = paddle.linalg.lstsq(x, y, driver="gelsd") print(results[0]) # [[ 0.78350395, -0.22165027, -0.62371236], # [-0.11340097, 0.78866047, 1.14948535]] print(results[1]) # [19.81443405, 10.43814468, 30.56185532]) print(results[2]) # 2 print(results[3]) # [9.03455734, 1.54167950] x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]]) y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]]) results = paddle.linalg.lstsq(x, y, driver="gels") print(results[0]) # [[ 0.39386186, 0.10230173, 0.93606132], # [ 0.10741687, -0.29028133, 0.11892585], # [-0.05115091, 0.51918161, -0.19948854]] print(results[1]) # [] """ device = paddle.get_device() if device == "cpu": if driver not in (None, "gels", "gelss", "gelsd", "gelsy"): raise ValueError( "Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}". format(driver)) driver = "gelsy" if driver is None else driver elif "gpu" in device: if driver not in (None, "gels"): raise ValueError( "Only support valid driver is 'gels' or None for CUDA inputs. But got {}". format(driver)) driver = "gels" if driver is None else driver else: raise RuntimeError("Only support lstsq api for CPU or CUDA device.") if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64): pass else: raise ValueError( "Only support x and y have the same dtype such as 'float32' and 'float64'." ) if rcond is None: if x.dtype == paddle.float32: rcond = 1e-7 * max(x.shape[-2], x.shape[-1]) elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) if paddle.in_dynamic_mode(): solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond, "driver", driver) if x.shape[-2] > x.shape[-1]: matmul_out = _varbase_creator(dtype=x.dtype) _C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y', False) minus_out = _C_ops.elementwise_sub(matmul_out, y) pow_out = _C_ops.pow(minus_out, 'factor', 2) residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim', False, 'reduce_all', False) else: residuals = paddle.empty(shape=[0], dtype=x.dtype) if driver == "gels": rank = paddle.empty(shape=[0], dtype=paddle.int32) singular_values = paddle.empty(shape=[0], dtype=x.dtype) elif driver == "gelsy": singular_values = paddle.empty(shape=[0], dtype=x.dtype) return solution, residuals, rank, singular_values helper = LayerHelper('lstsq', **locals()) check_variable_and_dtype( x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') check_variable_and_dtype( y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq') solution = helper.create_variable_for_type_inference(dtype=x.dtype) residuals = helper.create_variable_for_type_inference(dtype=x.dtype) rank = helper.create_variable_for_type_inference(dtype=paddle.int32) singular_values = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='lstsq', inputs={'X': x, 'Y': y}, outputs={ 'Solution': solution, 'Rank': rank, 'SingularValues': singular_values }, attrs={'rcond': rcond, 'driver': driver}) matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype) minus_out = helper.create_variable_for_type_inference(dtype=x.dtype) pow_out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='matmul_v2', inputs={'X': x, 'Y': solution}, outputs={'Out': matmul_out}, attrs={ 'trans_x': False, 'trans_y': False, }) helper.append_op( type='elementwise_sub', inputs={'X': matmul_out, 'Y': y}, outputs={'Out': minus_out}) helper.append_op( type='pow', inputs={'X': minus_out}, outputs={'Out': pow_out}, attrs={'factor': 2}) helper.append_op( type='reduce_sum', inputs={'X': pow_out}, outputs={'Out': residuals}, attrs={'dim': [-2], 'keep_dim': False, 'reduce_all': False}) if driver == "gels": rank = paddle.static.data(name='rank', shape=[0]) singular_values = paddle.static.data(name='singular_values', shape=[0]) elif driver == "gelsy": singular_values = paddle.static.data(name='singular_values', shape=[0]) return solution, residuals, rank, singular_values
on_off_command
Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string}
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands # MASKED: on_off_command function (lines 61-84) def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".")
61
84
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
action_command
Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string}
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") # MASKED: action_command function (lines 86-100) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]])
86
100
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
start_stop_command
Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string}
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) # MASKED: start_stop_command function (lines 102-123) def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".")
102
123
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
var_query
Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string}
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) # MASKED: var_query function (lines 142-164) def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val
142
164
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
flag_query
Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string}
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val # MASKED: flag_query function (lines 166-182) def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no"
166
182
import socket class UserException(Exception): pass def user_exception(s): raise UserException(s) class Macro: """Represents a macro to be run""" def __init__(self, code): """code: int - index of macro to run""" self.code = code class Command: """Represents a macro to be run""" def __init__(self, command): """command: string - command to send""" self.command = command class HomeVisionController: def __init__( self, ip_address, port, auth, on_off_appliance_codes={}, actions={}, process_actions={}, var_queries={}, flag_queries={}, flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]}, on_off_commands = None ): """ Args: ip_address: string port: int auth: string - key for authenticating with netio on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped var_queries: dict[string] => int - mapping of names to variable indexes flag_queries: dict[string] => int - mapping of names to flag indexes flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up) on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands """ self.ip_address = ip_address self.port = port self.auth = auth self.on_off_appliance_codes = on_off_appliance_codes self.actions = actions self.process_actions = process_actions self.var_queries = var_queries self.flag_queries = flag_queries self.flag_return_values = flag_return_values self.on_off_commands = on_off_commands def on_off_command(self, details): """Send an on or off command to an appliance Sends the specified command to the homevision through netio interface to control the specified appliance. Args: details: {"appliance": string, "state": string} """ if "appliance" not in details: raise Exception("appliance not specified") elif "state" not in details: raise Exception("state not specified") if details["appliance"] not in self.on_off_appliance_codes.keys(): raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys())) appliance_code = self.on_off_appliance_codes[details["appliance"]] if details['state'] == "ON": self._switch_on(appliance_code) elif details["state"] == "OFF": self._switch_off(appliance_code) else: raise Exception("state not supported. Must be either \"ON\" or \"OFF\".") def action_command(self, details): """Send an action command Sends the specified command to the homevision through netio interface. Args: details: {"command": string} """ if "command" not in details: raise Exception("Command not specified") if details["command"] not in self.actions.keys(): raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys())) self._handle_action(self.actions[details["command"]]) def start_stop_command(self, details): """Starts or stops a process Sends the specified command to the homevision through netio interface to control the specified process. Args: details: {"action": string, "process": string} """ if "action" not in details: raise Exception("action not specified") elif "process" not in details: raise Exception("process not specified") if details["process"] not in self.process_actions.keys(): raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys())) if details['action'] == "START": self._handle_action(self.process_actions[details["process"]]["START"]) elif details["action"] == "STOP": self._handle_action(self.process_actions[details["process"]]["STOP"]) else: raise Exception("action not supported. Must be either \"START\" or \"STOP\".") def _handle_action(self, action): def handle_single(a): if type(a) == Macro: self._run_macro(a.code) elif type(a) == Command: self._send_command(a.command) elif type(a) == Exception: raise a else: raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception") if type(action) == tuple: for a in action: handle_single(a) else: handle_single(action) def var_query(self, details): """Returns the answer to a query on variable Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.var_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys())) code = self.var_queries[details["query"]] if type(code) == int: val = self._get_var(code) elif type(code) == tuple: val = [self._get_var(c) for c in code] else: raise Exception("Internal Exception: variable code is not valid") return val def flag_query(self, details): """Returns the answer to a query on flag Returns the answer to a query on the specified variable using netio Args: details: {"query": string} """ if "query" not in details: raise Exception("query not specified") if details["query"] not in self.flag_queries.keys(): raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys())) val = self._get_flag(self.flag_queries[details["query"]]) return "yes" if val else "no" def _switch_on(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["ON"](code)) def _switch_off(self, code): if self.on_off_commands == None: raise Exception("No On/Off command set") self._handle_action(self.on_off_commands["OFF"](code)) def _run_macro(self, code): self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100') def _send_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.send(command) s.close() def _get_var(self, id): return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii"))) def _get_flag(self, id): ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii")) if ret in self.flag_return_values[False]: return False elif ret in self.flag_return_values[True]: return True else: raise Exception("Flag value not supported: " + ret) def _run_read_command(self, command): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.ip_address, self.port)) s.send(bytes("auth " + self.auth + "\n", encoding="ascii")) s.recv(10) s.send(command) s.send(b'\n') response = s.recv(10).decode(encoding="ascii").rstrip() s.close() return response
_find_files
Find files in directory with optional extensions. Args: directory (string) keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional) extensions (list): e.g. [".json" or "tsv"] (optional) verbose (bool): verbosity level (optional, default=True)
"""Define abstract base classes to construct FileFinder classes.""" import os import shutil from abc import ABC, abstractmethod from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Sequence, Union import mne_bids @dataclass class FileFinder(ABC): """Basic representation of class for finding and filtering files.""" hemispheres: Union[dict, None] = field(default_factory=dict) directory: Union[Path, str] = field(init=False) files: list = field(init=False, default_factory=list) def __str__(self): if not self.files: return "No corresponding files found." headers = ["Index", "Filename"] col_width = max(len(os.path.basename(file)) for file in self.files) format_row = f"{{:>{len(headers[0]) + 2}}}{{:>{col_width + 2}}}" terminal_size = "\u2500" * shutil.get_terminal_size().columns return "\n".join( ( "Corresponding files found:", "".join( f"{{:>{len(header) + 2}}}".format(header) for header in headers ), terminal_size, *( format_row.format(idx, os.path.basename(file)) for idx, file in enumerate(self.files) ), ) ) def __len__(self) -> int: if not self.files: return 0 return len(self.files) @abstractmethod def find_files( self, directory: Union[str, Path], extensions: Optional[Union[Sequence, str]] = None, keywords: Optional[Union[list, str]] = None, hemisphere: Optional[str] = None, stimulation: Optional[str] = None, medication: Optional[str] = None, exclude: Optional[Union[str, list]] = None, verbose: bool = False, ) -> None: """Find files in directory with optional keywords and extensions.""" @abstractmethod def filter_files( self, keywords: Optional[Union[str, list]] = None, hemisphere: Optional[str] = None, stimulation: Optional[str] = None, medication: Optional[str] = None, exclude: Optional[Union[str, list]] = None, verbose: bool = False, ) -> None: """Filter list of filepaths for given parameters.""" @staticmethod def _keyword_search( files: list[str], keywords: Optional[Union[str, list]] ) -> list: if not keywords: return files if not isinstance(keywords, list): keywords = [keywords] filtered_files = [ file for file in files if any(key in file for key in keywords) ] return filtered_files # MASKED: _find_files function (lines 88-108) def _filter_files( self, keywords: Optional[Union[str, list[str]]] = None, hemisphere: Optional[str] = None, stimulation: Optional[str] = None, medication: Optional[str] = None, exclude: Optional[Union[str, list[str]]] = None, ) -> None: """Filter filepaths for given parameters.""" filtered_files = self.files if exclude: if not isinstance(exclude, list): exclude = [exclude] filtered_files = [ file for file in filtered_files if not any(item in file for item in exclude) ] if keywords: if not isinstance(keywords, list): keywords = [keywords] filtered_files = self._keyword_search(filtered_files, keywords) if stimulation: if stimulation.lower() in "stimon": stim = "StimOn" elif stimulation.lower() in "stimoff": stim = "StimOff" else: raise ValueError("Keyword for stimulation not valid.") filtered_files = self._keyword_search(filtered_files, [stim]) if medication: if medication.lower() in "medon": med = "MedOn" elif medication.lower() in "medoff": med = "MedOff" else: raise ValueError("Keyword for medication not valid.") filtered_files = self._keyword_search(filtered_files, [med]) if hemisphere: matching_files = [] for file in filtered_files: subject = mne_bids.get_entities_from_fname(file)["subject"] if ( subject not in self.hemispheres or self.hemispheres[subject] is None ): raise HemisphereNotSpecifiedError( subject, self.hemispheres ) hem = self.hemispheres[subject] + "_" if hemisphere.lower() in "ipsilateral" and hem in file: matching_files.append(file) if hemisphere.lower() in "contralateral" and hem not in file: matching_files.append(file) filtered_files = matching_files self.files = filtered_files class DirectoryNotFoundError(Exception): """Exception raised when invalid Reader is passed. Attributes: directory -- input directory which caused the error """ def __init__( self, directory: Union[Path, str], message="Input directory was not found.", ): self.directory = directory self.message = message super().__init__(self.message) def __str__(self): return f"{self.message} Got: {self.directory}." class HemisphereNotSpecifiedError(Exception): """Exception raised when electrode hemisphere is not specified in settings. Attributes: subject -- input subject which caused the error hemisphere -- specified hemispheres message -- explanation of the error """ def __init__( self, subject, hemispheres, message=( "Input ECOG hemisphere is not specified in" " `filefinder_settings.py` for given subject." ), ) -> None: self.subject = subject self.hemispheres = hemispheres self.message = message super().__init__(self.message) def __str__(self): return ( f"{self.message} Unspecified subject: {self.subject}." f" Specified hemispheres: {self.hemispheres}." )
def _find_files( self, directory: Union[Path, str], extensions: Optional[Union[list, str]] = None, ) -> None: """Find files in directory with optional extensions. Args: directory (string) keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional) extensions (list): e.g. [".json" or "tsv"] (optional) verbose (bool): verbosity level (optional, default=True) """ files = [] for root, _, fnames in os.walk(directory): fnames = [os.path.join(root, file) for file in fnames] fnames = self._keyword_search(fnames, extensions) if fnames: files.extend(fnames) self.files = files
88
108
"""Define abstract base classes to construct FileFinder classes.""" import os import shutil from abc import ABC, abstractmethod from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Sequence, Union import mne_bids @dataclass class FileFinder(ABC): """Basic representation of class for finding and filtering files.""" hemispheres: Union[dict, None] = field(default_factory=dict) directory: Union[Path, str] = field(init=False) files: list = field(init=False, default_factory=list) def __str__(self): if not self.files: return "No corresponding files found." headers = ["Index", "Filename"] col_width = max(len(os.path.basename(file)) for file in self.files) format_row = f"{{:>{len(headers[0]) + 2}}}{{:>{col_width + 2}}}" terminal_size = "\u2500" * shutil.get_terminal_size().columns return "\n".join( ( "Corresponding files found:", "".join( f"{{:>{len(header) + 2}}}".format(header) for header in headers ), terminal_size, *( format_row.format(idx, os.path.basename(file)) for idx, file in enumerate(self.files) ), ) ) def __len__(self) -> int: if not self.files: return 0 return len(self.files) @abstractmethod def find_files( self, directory: Union[str, Path], extensions: Optional[Union[Sequence, str]] = None, keywords: Optional[Union[list, str]] = None, hemisphere: Optional[str] = None, stimulation: Optional[str] = None, medication: Optional[str] = None, exclude: Optional[Union[str, list]] = None, verbose: bool = False, ) -> None: """Find files in directory with optional keywords and extensions.""" @abstractmethod def filter_files( self, keywords: Optional[Union[str, list]] = None, hemisphere: Optional[str] = None, stimulation: Optional[str] = None, medication: Optional[str] = None, exclude: Optional[Union[str, list]] = None, verbose: bool = False, ) -> None: """Filter list of filepaths for given parameters.""" @staticmethod def _keyword_search( files: list[str], keywords: Optional[Union[str, list]] ) -> list: if not keywords: return files if not isinstance(keywords, list): keywords = [keywords] filtered_files = [ file for file in files if any(key in file for key in keywords) ] return filtered_files def _find_files( self, directory: Union[Path, str], extensions: Optional[Union[list, str]] = None, ) -> None: """Find files in directory with optional extensions. Args: directory (string) keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional) extensions (list): e.g. [".json" or "tsv"] (optional) verbose (bool): verbosity level (optional, default=True) """ files = [] for root, _, fnames in os.walk(directory): fnames = [os.path.join(root, file) for file in fnames] fnames = self._keyword_search(fnames, extensions) if fnames: files.extend(fnames) self.files = files def _filter_files( self, keywords: Optional[Union[str, list[str]]] = None, hemisphere: Optional[str] = None, stimulation: Optional[str] = None, medication: Optional[str] = None, exclude: Optional[Union[str, list[str]]] = None, ) -> None: """Filter filepaths for given parameters.""" filtered_files = self.files if exclude: if not isinstance(exclude, list): exclude = [exclude] filtered_files = [ file for file in filtered_files if not any(item in file for item in exclude) ] if keywords: if not isinstance(keywords, list): keywords = [keywords] filtered_files = self._keyword_search(filtered_files, keywords) if stimulation: if stimulation.lower() in "stimon": stim = "StimOn" elif stimulation.lower() in "stimoff": stim = "StimOff" else: raise ValueError("Keyword for stimulation not valid.") filtered_files = self._keyword_search(filtered_files, [stim]) if medication: if medication.lower() in "medon": med = "MedOn" elif medication.lower() in "medoff": med = "MedOff" else: raise ValueError("Keyword for medication not valid.") filtered_files = self._keyword_search(filtered_files, [med]) if hemisphere: matching_files = [] for file in filtered_files: subject = mne_bids.get_entities_from_fname(file)["subject"] if ( subject not in self.hemispheres or self.hemispheres[subject] is None ): raise HemisphereNotSpecifiedError( subject, self.hemispheres ) hem = self.hemispheres[subject] + "_" if hemisphere.lower() in "ipsilateral" and hem in file: matching_files.append(file) if hemisphere.lower() in "contralateral" and hem not in file: matching_files.append(file) filtered_files = matching_files self.files = filtered_files class DirectoryNotFoundError(Exception): """Exception raised when invalid Reader is passed. Attributes: directory -- input directory which caused the error """ def __init__( self, directory: Union[Path, str], message="Input directory was not found.", ): self.directory = directory self.message = message super().__init__(self.message) def __str__(self): return f"{self.message} Got: {self.directory}." class HemisphereNotSpecifiedError(Exception): """Exception raised when electrode hemisphere is not specified in settings. Attributes: subject -- input subject which caused the error hemisphere -- specified hemispheres message -- explanation of the error """ def __init__( self, subject, hemispheres, message=( "Input ECOG hemisphere is not specified in" " `filefinder_settings.py` for given subject." ), ) -> None: self.subject = subject self.hemispheres = hemispheres self.message = message super().__init__(self.message) def __str__(self): return ( f"{self.message} Unspecified subject: {self.subject}." f" Specified hemispheres: {self.hemispheres}." )
irfft2
Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input tensor s : sequence of ints, optional Shape of the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional Normalization mode (see `mt.fft`). Default is None. Returns ------- out : Tensor The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`.
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ... import opcodes as OperandDef from ..datasource import tensor as astensor from .core import TensorRealIFFTNMixin, validate_fftn, TensorRealFFTN class TensorIRFFT2(TensorRealFFTN, TensorRealIFFTNMixin): _op_type_ = OperandDef.IRFFT2 def __init__(self, shape=None, axes=None, norm=None, **kw): super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw) # MASKED: irfft2 function (lines 31-67)
def irfft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input tensor s : sequence of ints, optional Shape of the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional Normalization mode (see `mt.fft`). Default is None. Returns ------- out : Tensor The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`. """ if len(axes) != 2: raise ValueError("axes length should be 2") a = astensor(a) axes = validate_fftn(a, s=s, axes=axes, norm=norm) op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_)) return op(a)
31
67
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from ... import opcodes as OperandDef from ..datasource import tensor as astensor from .core import TensorRealIFFTNMixin, validate_fftn, TensorRealFFTN class TensorIRFFT2(TensorRealFFTN, TensorRealIFFTNMixin): _op_type_ = OperandDef.IRFFT2 def __init__(self, shape=None, axes=None, norm=None, **kw): super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw) def irfft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input tensor s : sequence of ints, optional Shape of the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional Normalization mode (see `mt.fft`). Default is None. Returns ------- out : Tensor The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`. """ if len(axes) != 2: raise ValueError("axes length should be 2") a = astensor(a) axes = validate_fftn(a, s=s, axes=axes, norm=norm) op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_)) return op(a)
_send_notification
Send a notification to a specific node_id This version of the overriden method includes the necessary crypto headers for the notification. :type notification: autopush.utils.WebPushNotification
"""WebPush Style Autopush Router This router handles notifications that should be dispatched to an Autopush node, or stores each individual message, along with its data, in a Message table for retrieval by the client. """ import json import time from StringIO import StringIO from typing import Any # noqa from botocore.exceptions import ClientError from twisted.internet.threads import deferToThread from twisted.web.client import FileBodyProducer from twisted.internet.defer import ( inlineCallbacks, returnValue, CancelledError, ) from twisted.internet.error import ( ConnectError, ConnectionClosed, ConnectionRefusedError, ) from twisted.logger import Logger from twisted.web._newclient import ResponseFailed from twisted.web.http import PotentialDataLoss from autopush.exceptions import ItemNotFound, RouterException from autopush.metrics import make_tags from autopush.protocol import IgnoreBody from autopush.router.interface import RouterResponse from autopush.types import JSONDict # noqa TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2" class WebPushRouter(object): """Implements :class: `autopush.router.interface.IRouter` for internal routing to an autopush node """ log = Logger() def __init__(self, conf, router_conf, db, agent): """Create a new Router""" self.conf = conf self.router_conf = router_conf self.db = db self.agent = agent @property def metrics(self): return self.db.metrics def register(self, uaid, router_data, app_id, *args, **kwargs): # type: (str, JSONDict, str, *Any, **Any) -> None """No additional routing data""" def amend_endpoint_response(self, response, router_data): # type: (JSONDict, JSONDict) -> None """Stubbed out for this router""" @inlineCallbacks def route_notification(self, notification, uaid_data): """Route a notification to an internal node, and store it if the node can't deliver immediately or is no longer a valid node """ # Determine if they're connected at the moment node_id = uaid_data.get("node_id") uaid = uaid_data["uaid"] router = self.db.router # Node_id is present, attempt delivery. # - Send Notification to node # - Success: Done, return 200 # - Error (Node busy): Jump to Save notification below # - Error (Client gone, node gone/dead): Clear node entry for user # - Both: Done, return 503 if node_id: result = None try: result = yield self._send_notification(uaid, node_id, notification) except (ConnectError, ConnectionClosed, ResponseFailed, CancelledError, PotentialDataLoss) as exc: self.metrics.increment("updates.client.host_gone") yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err) if isinstance(exc, ConnectionRefusedError): # Occurs if an IP record is now used by some other node # in AWS or if the connection timesout. self.log.debug("Could not route message: {exc}", exc=exc) if result and result.code == 200: returnValue(self.delivered_response(notification)) # Save notification, node is not present or busy # - Save notification # - Success (older version): Done, return 202 # - Error (db error): Done, return 503 try: yield self._save_notification(uaid_data, notification) except ClientError as e: log_exception = (e.response["Error"]["Code"] != "ProvisionedThroughputExceededException") raise RouterException("Error saving to database", status_code=503, response_body="Retry Request", log_exception=log_exception, errno=201) # - Lookup client again to get latest node state after save. # - Success (node found): Notify node of new notification # - Success: Done, return 200 # - Error (no client): Done, return 202 # - Error (no node): Clear node entry # - Both: Done, return 202 # - Success (no node): Done, return 202 # - Error (db error): Done, return 202 # - Error (no client) : Done, return 404 try: uaid_data = yield deferToThread(router.get_uaid, uaid) except ClientError: returnValue(self.stored_response(notification)) except ItemNotFound: self.metrics.increment("updates.client.deleted") raise RouterException("User was deleted", status_code=410, response_body="Invalid UAID", log_exception=False, errno=105) # Verify there's a node_id in here, if not we're done node_id = uaid_data.get("node_id") if not node_id: returnValue(self.stored_response(notification)) try: result = yield self._send_notification_check(uaid, node_id) except (ConnectError, ConnectionClosed, ResponseFailed) as exc: self.metrics.increment("updates.client.host_gone") if isinstance(exc, ConnectionRefusedError): self.log.debug("Could not route message: {exc}", exc=exc) yield deferToThread( router.clear_node, uaid_data).addErrback(self._eat_db_err) returnValue(self.stored_response(notification)) if result.code == 200: returnValue(self.delivered_response(notification)) else: ret_val = self.stored_response(notification) returnValue(ret_val) def delivered_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Stored')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl or 0}, logged_status=200) def stored_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Direct')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl}, logged_status=202) ############################################################# # Blocking Helper Functions ############################################################# # MASKED: _send_notification function (lines 179-197) def _send_notification_check(self, uaid, node_id): """Send a command to the node to check for notifications""" url = node_id + "/notif/" + uaid return self.agent.request( "PUT", url.encode("utf8"), ).addCallback(IgnoreBody.ignore) def _save_notification(self, uaid_data, notification): """Saves a notification, returns a deferred. This version of the overridden method saves each individual message to the message table along with relevant request headers if available. :type uaid_data: dict """ month_table = uaid_data["current_month"] if notification.ttl is None: # Note that this URL is temporary, as well as this warning as # we will 400 all missing TTL's eventually raise RouterException( "Missing TTL Header", response_body="Missing TTL Header, see: %s" % TTL_URL, status_code=400, errno=111, log_exception=False, ) if notification.ttl == 0: location = "%s/m/%s" % (self.conf.endpoint_url, notification.version) raise RouterException("Finished Routing", status_code=201, log_exception=False, headers={"TTL": str(notification.ttl), "Location": location}, logged_status=204) return deferToThread( self.db.message_table(month_table).store_message, notification=notification, ) ############################################################# # Error Callbacks ############################################################# def _eat_db_err(self, fail): """errBack for ignoring provisioned throughput errors""" fail.trap(ClientError)
def _send_notification(self, uaid, node_id, notification): """Send a notification to a specific node_id This version of the overriden method includes the necessary crypto headers for the notification. :type notification: autopush.utils.WebPushNotification """ payload = notification.serialize() payload["timestamp"] = int(time.time()) url = node_id + "/push/" + uaid request = self.agent.request( "PUT", url.encode("utf8"), bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))), ) request.addCallback(IgnoreBody.ignore) return request
179
197
"""WebPush Style Autopush Router This router handles notifications that should be dispatched to an Autopush node, or stores each individual message, along with its data, in a Message table for retrieval by the client. """ import json import time from StringIO import StringIO from typing import Any # noqa from botocore.exceptions import ClientError from twisted.internet.threads import deferToThread from twisted.web.client import FileBodyProducer from twisted.internet.defer import ( inlineCallbacks, returnValue, CancelledError, ) from twisted.internet.error import ( ConnectError, ConnectionClosed, ConnectionRefusedError, ) from twisted.logger import Logger from twisted.web._newclient import ResponseFailed from twisted.web.http import PotentialDataLoss from autopush.exceptions import ItemNotFound, RouterException from autopush.metrics import make_tags from autopush.protocol import IgnoreBody from autopush.router.interface import RouterResponse from autopush.types import JSONDict # noqa TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2" class WebPushRouter(object): """Implements :class: `autopush.router.interface.IRouter` for internal routing to an autopush node """ log = Logger() def __init__(self, conf, router_conf, db, agent): """Create a new Router""" self.conf = conf self.router_conf = router_conf self.db = db self.agent = agent @property def metrics(self): return self.db.metrics def register(self, uaid, router_data, app_id, *args, **kwargs): # type: (str, JSONDict, str, *Any, **Any) -> None """No additional routing data""" def amend_endpoint_response(self, response, router_data): # type: (JSONDict, JSONDict) -> None """Stubbed out for this router""" @inlineCallbacks def route_notification(self, notification, uaid_data): """Route a notification to an internal node, and store it if the node can't deliver immediately or is no longer a valid node """ # Determine if they're connected at the moment node_id = uaid_data.get("node_id") uaid = uaid_data["uaid"] router = self.db.router # Node_id is present, attempt delivery. # - Send Notification to node # - Success: Done, return 200 # - Error (Node busy): Jump to Save notification below # - Error (Client gone, node gone/dead): Clear node entry for user # - Both: Done, return 503 if node_id: result = None try: result = yield self._send_notification(uaid, node_id, notification) except (ConnectError, ConnectionClosed, ResponseFailed, CancelledError, PotentialDataLoss) as exc: self.metrics.increment("updates.client.host_gone") yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err) if isinstance(exc, ConnectionRefusedError): # Occurs if an IP record is now used by some other node # in AWS or if the connection timesout. self.log.debug("Could not route message: {exc}", exc=exc) if result and result.code == 200: returnValue(self.delivered_response(notification)) # Save notification, node is not present or busy # - Save notification # - Success (older version): Done, return 202 # - Error (db error): Done, return 503 try: yield self._save_notification(uaid_data, notification) except ClientError as e: log_exception = (e.response["Error"]["Code"] != "ProvisionedThroughputExceededException") raise RouterException("Error saving to database", status_code=503, response_body="Retry Request", log_exception=log_exception, errno=201) # - Lookup client again to get latest node state after save. # - Success (node found): Notify node of new notification # - Success: Done, return 200 # - Error (no client): Done, return 202 # - Error (no node): Clear node entry # - Both: Done, return 202 # - Success (no node): Done, return 202 # - Error (db error): Done, return 202 # - Error (no client) : Done, return 404 try: uaid_data = yield deferToThread(router.get_uaid, uaid) except ClientError: returnValue(self.stored_response(notification)) except ItemNotFound: self.metrics.increment("updates.client.deleted") raise RouterException("User was deleted", status_code=410, response_body="Invalid UAID", log_exception=False, errno=105) # Verify there's a node_id in here, if not we're done node_id = uaid_data.get("node_id") if not node_id: returnValue(self.stored_response(notification)) try: result = yield self._send_notification_check(uaid, node_id) except (ConnectError, ConnectionClosed, ResponseFailed) as exc: self.metrics.increment("updates.client.host_gone") if isinstance(exc, ConnectionRefusedError): self.log.debug("Could not route message: {exc}", exc=exc) yield deferToThread( router.clear_node, uaid_data).addErrback(self._eat_db_err) returnValue(self.stored_response(notification)) if result.code == 200: returnValue(self.delivered_response(notification)) else: ret_val = self.stored_response(notification) returnValue(ret_val) def delivered_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Stored')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl or 0}, logged_status=200) def stored_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Direct')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl}, logged_status=202) ############################################################# # Blocking Helper Functions ############################################################# def _send_notification(self, uaid, node_id, notification): """Send a notification to a specific node_id This version of the overriden method includes the necessary crypto headers for the notification. :type notification: autopush.utils.WebPushNotification """ payload = notification.serialize() payload["timestamp"] = int(time.time()) url = node_id + "/push/" + uaid request = self.agent.request( "PUT", url.encode("utf8"), bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))), ) request.addCallback(IgnoreBody.ignore) return request def _send_notification_check(self, uaid, node_id): """Send a command to the node to check for notifications""" url = node_id + "/notif/" + uaid return self.agent.request( "PUT", url.encode("utf8"), ).addCallback(IgnoreBody.ignore) def _save_notification(self, uaid_data, notification): """Saves a notification, returns a deferred. This version of the overridden method saves each individual message to the message table along with relevant request headers if available. :type uaid_data: dict """ month_table = uaid_data["current_month"] if notification.ttl is None: # Note that this URL is temporary, as well as this warning as # we will 400 all missing TTL's eventually raise RouterException( "Missing TTL Header", response_body="Missing TTL Header, see: %s" % TTL_URL, status_code=400, errno=111, log_exception=False, ) if notification.ttl == 0: location = "%s/m/%s" % (self.conf.endpoint_url, notification.version) raise RouterException("Finished Routing", status_code=201, log_exception=False, headers={"TTL": str(notification.ttl), "Location": location}, logged_status=204) return deferToThread( self.db.message_table(month_table).store_message, notification=notification, ) ############################################################# # Error Callbacks ############################################################# def _eat_db_err(self, fail): """errBack for ignoring provisioned throughput errors""" fail.trap(ClientError)
_save_notification
Saves a notification, returns a deferred. This version of the overridden method saves each individual message to the message table along with relevant request headers if available. :type uaid_data: dict
"""WebPush Style Autopush Router This router handles notifications that should be dispatched to an Autopush node, or stores each individual message, along with its data, in a Message table for retrieval by the client. """ import json import time from StringIO import StringIO from typing import Any # noqa from botocore.exceptions import ClientError from twisted.internet.threads import deferToThread from twisted.web.client import FileBodyProducer from twisted.internet.defer import ( inlineCallbacks, returnValue, CancelledError, ) from twisted.internet.error import ( ConnectError, ConnectionClosed, ConnectionRefusedError, ) from twisted.logger import Logger from twisted.web._newclient import ResponseFailed from twisted.web.http import PotentialDataLoss from autopush.exceptions import ItemNotFound, RouterException from autopush.metrics import make_tags from autopush.protocol import IgnoreBody from autopush.router.interface import RouterResponse from autopush.types import JSONDict # noqa TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2" class WebPushRouter(object): """Implements :class: `autopush.router.interface.IRouter` for internal routing to an autopush node """ log = Logger() def __init__(self, conf, router_conf, db, agent): """Create a new Router""" self.conf = conf self.router_conf = router_conf self.db = db self.agent = agent @property def metrics(self): return self.db.metrics def register(self, uaid, router_data, app_id, *args, **kwargs): # type: (str, JSONDict, str, *Any, **Any) -> None """No additional routing data""" def amend_endpoint_response(self, response, router_data): # type: (JSONDict, JSONDict) -> None """Stubbed out for this router""" @inlineCallbacks def route_notification(self, notification, uaid_data): """Route a notification to an internal node, and store it if the node can't deliver immediately or is no longer a valid node """ # Determine if they're connected at the moment node_id = uaid_data.get("node_id") uaid = uaid_data["uaid"] router = self.db.router # Node_id is present, attempt delivery. # - Send Notification to node # - Success: Done, return 200 # - Error (Node busy): Jump to Save notification below # - Error (Client gone, node gone/dead): Clear node entry for user # - Both: Done, return 503 if node_id: result = None try: result = yield self._send_notification(uaid, node_id, notification) except (ConnectError, ConnectionClosed, ResponseFailed, CancelledError, PotentialDataLoss) as exc: self.metrics.increment("updates.client.host_gone") yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err) if isinstance(exc, ConnectionRefusedError): # Occurs if an IP record is now used by some other node # in AWS or if the connection timesout. self.log.debug("Could not route message: {exc}", exc=exc) if result and result.code == 200: returnValue(self.delivered_response(notification)) # Save notification, node is not present or busy # - Save notification # - Success (older version): Done, return 202 # - Error (db error): Done, return 503 try: yield self._save_notification(uaid_data, notification) except ClientError as e: log_exception = (e.response["Error"]["Code"] != "ProvisionedThroughputExceededException") raise RouterException("Error saving to database", status_code=503, response_body="Retry Request", log_exception=log_exception, errno=201) # - Lookup client again to get latest node state after save. # - Success (node found): Notify node of new notification # - Success: Done, return 200 # - Error (no client): Done, return 202 # - Error (no node): Clear node entry # - Both: Done, return 202 # - Success (no node): Done, return 202 # - Error (db error): Done, return 202 # - Error (no client) : Done, return 404 try: uaid_data = yield deferToThread(router.get_uaid, uaid) except ClientError: returnValue(self.stored_response(notification)) except ItemNotFound: self.metrics.increment("updates.client.deleted") raise RouterException("User was deleted", status_code=410, response_body="Invalid UAID", log_exception=False, errno=105) # Verify there's a node_id in here, if not we're done node_id = uaid_data.get("node_id") if not node_id: returnValue(self.stored_response(notification)) try: result = yield self._send_notification_check(uaid, node_id) except (ConnectError, ConnectionClosed, ResponseFailed) as exc: self.metrics.increment("updates.client.host_gone") if isinstance(exc, ConnectionRefusedError): self.log.debug("Could not route message: {exc}", exc=exc) yield deferToThread( router.clear_node, uaid_data).addErrback(self._eat_db_err) returnValue(self.stored_response(notification)) if result.code == 200: returnValue(self.delivered_response(notification)) else: ret_val = self.stored_response(notification) returnValue(ret_val) def delivered_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Stored')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl or 0}, logged_status=200) def stored_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Direct')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl}, logged_status=202) ############################################################# # Blocking Helper Functions ############################################################# def _send_notification(self, uaid, node_id, notification): """Send a notification to a specific node_id This version of the overriden method includes the necessary crypto headers for the notification. :type notification: autopush.utils.WebPushNotification """ payload = notification.serialize() payload["timestamp"] = int(time.time()) url = node_id + "/push/" + uaid request = self.agent.request( "PUT", url.encode("utf8"), bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))), ) request.addCallback(IgnoreBody.ignore) return request def _send_notification_check(self, uaid, node_id): """Send a command to the node to check for notifications""" url = node_id + "/notif/" + uaid return self.agent.request( "PUT", url.encode("utf8"), ).addCallback(IgnoreBody.ignore) # MASKED: _save_notification function (lines 207-239) ############################################################# # Error Callbacks ############################################################# def _eat_db_err(self, fail): """errBack for ignoring provisioned throughput errors""" fail.trap(ClientError)
def _save_notification(self, uaid_data, notification): """Saves a notification, returns a deferred. This version of the overridden method saves each individual message to the message table along with relevant request headers if available. :type uaid_data: dict """ month_table = uaid_data["current_month"] if notification.ttl is None: # Note that this URL is temporary, as well as this warning as # we will 400 all missing TTL's eventually raise RouterException( "Missing TTL Header", response_body="Missing TTL Header, see: %s" % TTL_URL, status_code=400, errno=111, log_exception=False, ) if notification.ttl == 0: location = "%s/m/%s" % (self.conf.endpoint_url, notification.version) raise RouterException("Finished Routing", status_code=201, log_exception=False, headers={"TTL": str(notification.ttl), "Location": location}, logged_status=204) return deferToThread( self.db.message_table(month_table).store_message, notification=notification, )
207
239
"""WebPush Style Autopush Router This router handles notifications that should be dispatched to an Autopush node, or stores each individual message, along with its data, in a Message table for retrieval by the client. """ import json import time from StringIO import StringIO from typing import Any # noqa from botocore.exceptions import ClientError from twisted.internet.threads import deferToThread from twisted.web.client import FileBodyProducer from twisted.internet.defer import ( inlineCallbacks, returnValue, CancelledError, ) from twisted.internet.error import ( ConnectError, ConnectionClosed, ConnectionRefusedError, ) from twisted.logger import Logger from twisted.web._newclient import ResponseFailed from twisted.web.http import PotentialDataLoss from autopush.exceptions import ItemNotFound, RouterException from autopush.metrics import make_tags from autopush.protocol import IgnoreBody from autopush.router.interface import RouterResponse from autopush.types import JSONDict # noqa TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2" class WebPushRouter(object): """Implements :class: `autopush.router.interface.IRouter` for internal routing to an autopush node """ log = Logger() def __init__(self, conf, router_conf, db, agent): """Create a new Router""" self.conf = conf self.router_conf = router_conf self.db = db self.agent = agent @property def metrics(self): return self.db.metrics def register(self, uaid, router_data, app_id, *args, **kwargs): # type: (str, JSONDict, str, *Any, **Any) -> None """No additional routing data""" def amend_endpoint_response(self, response, router_data): # type: (JSONDict, JSONDict) -> None """Stubbed out for this router""" @inlineCallbacks def route_notification(self, notification, uaid_data): """Route a notification to an internal node, and store it if the node can't deliver immediately or is no longer a valid node """ # Determine if they're connected at the moment node_id = uaid_data.get("node_id") uaid = uaid_data["uaid"] router = self.db.router # Node_id is present, attempt delivery. # - Send Notification to node # - Success: Done, return 200 # - Error (Node busy): Jump to Save notification below # - Error (Client gone, node gone/dead): Clear node entry for user # - Both: Done, return 503 if node_id: result = None try: result = yield self._send_notification(uaid, node_id, notification) except (ConnectError, ConnectionClosed, ResponseFailed, CancelledError, PotentialDataLoss) as exc: self.metrics.increment("updates.client.host_gone") yield deferToThread(router.clear_node, uaid_data).addErrback(self._eat_db_err) if isinstance(exc, ConnectionRefusedError): # Occurs if an IP record is now used by some other node # in AWS or if the connection timesout. self.log.debug("Could not route message: {exc}", exc=exc) if result and result.code == 200: returnValue(self.delivered_response(notification)) # Save notification, node is not present or busy # - Save notification # - Success (older version): Done, return 202 # - Error (db error): Done, return 503 try: yield self._save_notification(uaid_data, notification) except ClientError as e: log_exception = (e.response["Error"]["Code"] != "ProvisionedThroughputExceededException") raise RouterException("Error saving to database", status_code=503, response_body="Retry Request", log_exception=log_exception, errno=201) # - Lookup client again to get latest node state after save. # - Success (node found): Notify node of new notification # - Success: Done, return 200 # - Error (no client): Done, return 202 # - Error (no node): Clear node entry # - Both: Done, return 202 # - Success (no node): Done, return 202 # - Error (db error): Done, return 202 # - Error (no client) : Done, return 404 try: uaid_data = yield deferToThread(router.get_uaid, uaid) except ClientError: returnValue(self.stored_response(notification)) except ItemNotFound: self.metrics.increment("updates.client.deleted") raise RouterException("User was deleted", status_code=410, response_body="Invalid UAID", log_exception=False, errno=105) # Verify there's a node_id in here, if not we're done node_id = uaid_data.get("node_id") if not node_id: returnValue(self.stored_response(notification)) try: result = yield self._send_notification_check(uaid, node_id) except (ConnectError, ConnectionClosed, ResponseFailed) as exc: self.metrics.increment("updates.client.host_gone") if isinstance(exc, ConnectionRefusedError): self.log.debug("Could not route message: {exc}", exc=exc) yield deferToThread( router.clear_node, uaid_data).addErrback(self._eat_db_err) returnValue(self.stored_response(notification)) if result.code == 200: returnValue(self.delivered_response(notification)) else: ret_val = self.stored_response(notification) returnValue(ret_val) def delivered_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Stored')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl or 0}, logged_status=200) def stored_response(self, notification): self.metrics.increment("notification.message_data", notification.data_length, tags=make_tags(destination='Direct')) location = "%s/m/%s" % (self.conf.endpoint_url, notification.location) return RouterResponse(status_code=201, response_body="", headers={"Location": location, "TTL": notification.ttl}, logged_status=202) ############################################################# # Blocking Helper Functions ############################################################# def _send_notification(self, uaid, node_id, notification): """Send a notification to a specific node_id This version of the overriden method includes the necessary crypto headers for the notification. :type notification: autopush.utils.WebPushNotification """ payload = notification.serialize() payload["timestamp"] = int(time.time()) url = node_id + "/push/" + uaid request = self.agent.request( "PUT", url.encode("utf8"), bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))), ) request.addCallback(IgnoreBody.ignore) return request def _send_notification_check(self, uaid, node_id): """Send a command to the node to check for notifications""" url = node_id + "/notif/" + uaid return self.agent.request( "PUT", url.encode("utf8"), ).addCallback(IgnoreBody.ignore) def _save_notification(self, uaid_data, notification): """Saves a notification, returns a deferred. This version of the overridden method saves each individual message to the message table along with relevant request headers if available. :type uaid_data: dict """ month_table = uaid_data["current_month"] if notification.ttl is None: # Note that this URL is temporary, as well as this warning as # we will 400 all missing TTL's eventually raise RouterException( "Missing TTL Header", response_body="Missing TTL Header, see: %s" % TTL_URL, status_code=400, errno=111, log_exception=False, ) if notification.ttl == 0: location = "%s/m/%s" % (self.conf.endpoint_url, notification.version) raise RouterException("Finished Routing", status_code=201, log_exception=False, headers={"TTL": str(notification.ttl), "Location": location}, logged_status=204) return deferToThread( self.db.message_table(month_table).store_message, notification=notification, ) ############################################################# # Error Callbacks ############################################################# def _eat_db_err(self, fail): """errBack for ignoring provisioned throughput errors""" fail.trap(ClientError)
_recursive_apply
This function is "applied" to every child in the block. This function in turn registers the forward hook to each module. It helps logging the input output tensors of that module.
# Third Party import mxnet as mx from mxnet.ndarray import NDArray # First Party from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys from smdebug.core.hook import CallbackHook from smdebug.core.json_config import DEFAULT_WORKER_NAME from smdebug.core.utils import FRAMEWORK, error_handling_agent from smdebug.mxnet.collection import CollectionManager from smdebug.mxnet.graph import _net2pb from smdebug.mxnet.singleton_utils import set_hook from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array from smdebug.profiler.profiler_config_parser import get_profiler_config_parser DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES] COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [ CollectionKeys.WEIGHTS, CollectionKeys.BIASES, CollectionKeys.GRADIENTS, CollectionKeys.LOSSES, ] profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH) class Hook(CallbackHook): def __init__( self, out_dir=None, export_tensorboard=False, tensorboard_dir=None, dry_run=False, reduction_config=None, save_config=None, include_regex=None, include_collections=None, save_all=False, include_workers="one", ): collection_manager = CollectionManager() super().__init__( collection_manager=collection_manager, default_include_collections=DEFAULT_INCLUDE_COLLECTIONS, profiler_config_parser=profiler_config_parser, data_type_name=mx.ndarray.NDArray.__name__, out_dir=out_dir, export_tensorboard=export_tensorboard, tensorboard_dir=tensorboard_dir, dry_run=dry_run, reduction_config=reduction_config, save_config=save_config, include_regex=include_regex, include_collections=include_collections, save_all=save_all, include_workers=include_workers, ) self.last_block = None self.model = None self.exported_model = False # Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well. self.registered_blocks = set() self.worker = self._get_worker_name() set_hook(self) def _get_worker_name(self): try: import horovod.mxnet as hvd if hvd.size(): return f"worker_{hvd.rank()}" except (ModuleNotFoundError, ValueError, ImportError): pass return DEFAULT_WORKER_NAME def _get_num_workers(self): try: import horovod.mxnet as hvd if hvd.size(): return hvd.size() except (ModuleNotFoundError, ValueError, ImportError): pass return 1 def _cleanup(self): # Write the gradients of the past step if the writer is still available. if self.writer is not None and self.last_block is not None: self._log_params(self.last_block) if self.exported_model is False: self._export_model() super()._cleanup() def _log_params(self, block): params = block.collect_params().values() for param in params: self._log_param(param) def _log_param(self, param): try: self._save_for_tensor( tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0]) ) # If Gradient for this param is available if param.grad_req != "null": self._save_for_tensor( tensor_name=self.GRADIENT_PREFIX + param.name, tensor_value=param.grad(param.list_ctx()[0]), ) except RuntimeError as e: self.logger.warning( f"Could not log parameter {param.name} due to the mxnet exception: {e}" ) def _export_model(self): if self.model is not None: try: tb_writer = self._maybe_get_tb_writer() if tb_writer: tb_writer.write_graph(_net2pb(self.model)) except (RuntimeError, TypeError) as e: self.logger.warning( f"Could not export model graph for tensorboard " f"due to the mxnet exception: {e}" ) def _get_default_collections(self): return DEFAULT_MXNET_COLLECTIONS # This hook is invoked by trainer prior to running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_pre_hook(self, block, inputs): if self.writer is not None: # Write the params and gradients of the # past step if the writer is still available. self._log_params(block) self._close_writers() self._close_tb_writer() if not self.prepared_collections: # at this point we need all collections to be ready # this may not be the case at creation of hook # as user's code after hook might add collections self._prepare_collections() self.prepared_collections = True self._increment_step() if self._get_collections_to_save_for_step(): self._initialize_writers() if self.exported_model is False: self._export_model() self.exported_model = True if self.last_saved_step is not None and not self.exported_collections: self.export_collections() self.exported_collections = True self.last_block = block self._save_custom_tensors_post_step() # This hook is invoked by trainer after running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_hook(self, block, inputs, outputs): if not self._get_collections_to_save_for_step(): return block_name = block.name # This overwhelms the logs; turn back on if you really need it # logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name)) # Output input tensor self._write_inputs(block_name, inputs) # Output output tensors self._write_outputs(block_name, outputs) self.last_saved_step = self.step # MASKED: _recursive_apply function (lines 185-196) def _is_recursive_needed(self): collections_to_save = self.include_collections # Check if default collection has a regex associated with it. # If it does we would need to apply hook recursively. if ( len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0 and CollectionKeys.DEFAULT in collections_to_save ): return True # Get the collections that are to be saved but are not part of default collections # We will need to apply hook recursively to get tensors specified in those collections. extra_coll = [ value for value in collections_to_save if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK ] # extra_coll contains the collections that are not part of default collections. return len(extra_coll) != 0 def register_hook(self, block): # for compatibility with ZCC patches which call this self.register_block(block) @error_handling_agent.catch_smdebug_errors() def register_block(self, block): """ This function registers the forward hook. If user wants to register the hook for every child in the given block, then the function calls "apply" API for registration of the hook. The hook is registered recursively, if user has specified the collections that are more than the default collectors viz. gradients, weight and bias """ if not isinstance(block, mx.gluon.Block): self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.") return # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return # Skip the forward pre hook for the Loss blocks. if isinstance(block, mx.gluon.loss.Loss): self.logger.info(f"Registering hook for block {block.name}") block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) return else: self.model = block is_recursive = self._is_recursive_needed() block.register_forward_pre_hook(self.forward_pre_hook) if is_recursive is True: block.apply(self._recursive_apply) else: block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) @staticmethod def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs): return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs) @staticmethod def _make_numpy_array(tensor_value): if isinstance(tensor_value, NDArray): return tensor_value.asnumpy() return make_numpy_array(tensor_value)
def _recursive_apply(self, block): """ This function is "applied" to every child in the block. This function in turn registers the forward hook to each module. It helps logging the input output tensors of that module. """ # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block)
185
196
# Third Party import mxnet as mx from mxnet.ndarray import NDArray # First Party from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys from smdebug.core.hook import CallbackHook from smdebug.core.json_config import DEFAULT_WORKER_NAME from smdebug.core.utils import FRAMEWORK, error_handling_agent from smdebug.mxnet.collection import CollectionManager from smdebug.mxnet.graph import _net2pb from smdebug.mxnet.singleton_utils import set_hook from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array from smdebug.profiler.profiler_config_parser import get_profiler_config_parser DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES] COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [ CollectionKeys.WEIGHTS, CollectionKeys.BIASES, CollectionKeys.GRADIENTS, CollectionKeys.LOSSES, ] profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH) class Hook(CallbackHook): def __init__( self, out_dir=None, export_tensorboard=False, tensorboard_dir=None, dry_run=False, reduction_config=None, save_config=None, include_regex=None, include_collections=None, save_all=False, include_workers="one", ): collection_manager = CollectionManager() super().__init__( collection_manager=collection_manager, default_include_collections=DEFAULT_INCLUDE_COLLECTIONS, profiler_config_parser=profiler_config_parser, data_type_name=mx.ndarray.NDArray.__name__, out_dir=out_dir, export_tensorboard=export_tensorboard, tensorboard_dir=tensorboard_dir, dry_run=dry_run, reduction_config=reduction_config, save_config=save_config, include_regex=include_regex, include_collections=include_collections, save_all=save_all, include_workers=include_workers, ) self.last_block = None self.model = None self.exported_model = False # Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well. self.registered_blocks = set() self.worker = self._get_worker_name() set_hook(self) def _get_worker_name(self): try: import horovod.mxnet as hvd if hvd.size(): return f"worker_{hvd.rank()}" except (ModuleNotFoundError, ValueError, ImportError): pass return DEFAULT_WORKER_NAME def _get_num_workers(self): try: import horovod.mxnet as hvd if hvd.size(): return hvd.size() except (ModuleNotFoundError, ValueError, ImportError): pass return 1 def _cleanup(self): # Write the gradients of the past step if the writer is still available. if self.writer is not None and self.last_block is not None: self._log_params(self.last_block) if self.exported_model is False: self._export_model() super()._cleanup() def _log_params(self, block): params = block.collect_params().values() for param in params: self._log_param(param) def _log_param(self, param): try: self._save_for_tensor( tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0]) ) # If Gradient for this param is available if param.grad_req != "null": self._save_for_tensor( tensor_name=self.GRADIENT_PREFIX + param.name, tensor_value=param.grad(param.list_ctx()[0]), ) except RuntimeError as e: self.logger.warning( f"Could not log parameter {param.name} due to the mxnet exception: {e}" ) def _export_model(self): if self.model is not None: try: tb_writer = self._maybe_get_tb_writer() if tb_writer: tb_writer.write_graph(_net2pb(self.model)) except (RuntimeError, TypeError) as e: self.logger.warning( f"Could not export model graph for tensorboard " f"due to the mxnet exception: {e}" ) def _get_default_collections(self): return DEFAULT_MXNET_COLLECTIONS # This hook is invoked by trainer prior to running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_pre_hook(self, block, inputs): if self.writer is not None: # Write the params and gradients of the # past step if the writer is still available. self._log_params(block) self._close_writers() self._close_tb_writer() if not self.prepared_collections: # at this point we need all collections to be ready # this may not be the case at creation of hook # as user's code after hook might add collections self._prepare_collections() self.prepared_collections = True self._increment_step() if self._get_collections_to_save_for_step(): self._initialize_writers() if self.exported_model is False: self._export_model() self.exported_model = True if self.last_saved_step is not None and not self.exported_collections: self.export_collections() self.exported_collections = True self.last_block = block self._save_custom_tensors_post_step() # This hook is invoked by trainer after running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_hook(self, block, inputs, outputs): if not self._get_collections_to_save_for_step(): return block_name = block.name # This overwhelms the logs; turn back on if you really need it # logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name)) # Output input tensor self._write_inputs(block_name, inputs) # Output output tensors self._write_outputs(block_name, outputs) self.last_saved_step = self.step def _recursive_apply(self, block): """ This function is "applied" to every child in the block. This function in turn registers the forward hook to each module. It helps logging the input output tensors of that module. """ # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) def _is_recursive_needed(self): collections_to_save = self.include_collections # Check if default collection has a regex associated with it. # If it does we would need to apply hook recursively. if ( len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0 and CollectionKeys.DEFAULT in collections_to_save ): return True # Get the collections that are to be saved but are not part of default collections # We will need to apply hook recursively to get tensors specified in those collections. extra_coll = [ value for value in collections_to_save if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK ] # extra_coll contains the collections that are not part of default collections. return len(extra_coll) != 0 def register_hook(self, block): # for compatibility with ZCC patches which call this self.register_block(block) @error_handling_agent.catch_smdebug_errors() def register_block(self, block): """ This function registers the forward hook. If user wants to register the hook for every child in the given block, then the function calls "apply" API for registration of the hook. The hook is registered recursively, if user has specified the collections that are more than the default collectors viz. gradients, weight and bias """ if not isinstance(block, mx.gluon.Block): self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.") return # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return # Skip the forward pre hook for the Loss blocks. if isinstance(block, mx.gluon.loss.Loss): self.logger.info(f"Registering hook for block {block.name}") block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) return else: self.model = block is_recursive = self._is_recursive_needed() block.register_forward_pre_hook(self.forward_pre_hook) if is_recursive is True: block.apply(self._recursive_apply) else: block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) @staticmethod def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs): return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs) @staticmethod def _make_numpy_array(tensor_value): if isinstance(tensor_value, NDArray): return tensor_value.asnumpy() return make_numpy_array(tensor_value)
register_block
This function registers the forward hook. If user wants to register the hook for every child in the given block, then the function calls "apply" API for registration of the hook. The hook is registered recursively, if user has specified the collections that are more than the default collectors viz. gradients, weight and bias
# Third Party import mxnet as mx from mxnet.ndarray import NDArray # First Party from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys from smdebug.core.hook import CallbackHook from smdebug.core.json_config import DEFAULT_WORKER_NAME from smdebug.core.utils import FRAMEWORK, error_handling_agent from smdebug.mxnet.collection import CollectionManager from smdebug.mxnet.graph import _net2pb from smdebug.mxnet.singleton_utils import set_hook from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array from smdebug.profiler.profiler_config_parser import get_profiler_config_parser DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES] COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [ CollectionKeys.WEIGHTS, CollectionKeys.BIASES, CollectionKeys.GRADIENTS, CollectionKeys.LOSSES, ] profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH) class Hook(CallbackHook): def __init__( self, out_dir=None, export_tensorboard=False, tensorboard_dir=None, dry_run=False, reduction_config=None, save_config=None, include_regex=None, include_collections=None, save_all=False, include_workers="one", ): collection_manager = CollectionManager() super().__init__( collection_manager=collection_manager, default_include_collections=DEFAULT_INCLUDE_COLLECTIONS, profiler_config_parser=profiler_config_parser, data_type_name=mx.ndarray.NDArray.__name__, out_dir=out_dir, export_tensorboard=export_tensorboard, tensorboard_dir=tensorboard_dir, dry_run=dry_run, reduction_config=reduction_config, save_config=save_config, include_regex=include_regex, include_collections=include_collections, save_all=save_all, include_workers=include_workers, ) self.last_block = None self.model = None self.exported_model = False # Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well. self.registered_blocks = set() self.worker = self._get_worker_name() set_hook(self) def _get_worker_name(self): try: import horovod.mxnet as hvd if hvd.size(): return f"worker_{hvd.rank()}" except (ModuleNotFoundError, ValueError, ImportError): pass return DEFAULT_WORKER_NAME def _get_num_workers(self): try: import horovod.mxnet as hvd if hvd.size(): return hvd.size() except (ModuleNotFoundError, ValueError, ImportError): pass return 1 def _cleanup(self): # Write the gradients of the past step if the writer is still available. if self.writer is not None and self.last_block is not None: self._log_params(self.last_block) if self.exported_model is False: self._export_model() super()._cleanup() def _log_params(self, block): params = block.collect_params().values() for param in params: self._log_param(param) def _log_param(self, param): try: self._save_for_tensor( tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0]) ) # If Gradient for this param is available if param.grad_req != "null": self._save_for_tensor( tensor_name=self.GRADIENT_PREFIX + param.name, tensor_value=param.grad(param.list_ctx()[0]), ) except RuntimeError as e: self.logger.warning( f"Could not log parameter {param.name} due to the mxnet exception: {e}" ) def _export_model(self): if self.model is not None: try: tb_writer = self._maybe_get_tb_writer() if tb_writer: tb_writer.write_graph(_net2pb(self.model)) except (RuntimeError, TypeError) as e: self.logger.warning( f"Could not export model graph for tensorboard " f"due to the mxnet exception: {e}" ) def _get_default_collections(self): return DEFAULT_MXNET_COLLECTIONS # This hook is invoked by trainer prior to running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_pre_hook(self, block, inputs): if self.writer is not None: # Write the params and gradients of the # past step if the writer is still available. self._log_params(block) self._close_writers() self._close_tb_writer() if not self.prepared_collections: # at this point we need all collections to be ready # this may not be the case at creation of hook # as user's code after hook might add collections self._prepare_collections() self.prepared_collections = True self._increment_step() if self._get_collections_to_save_for_step(): self._initialize_writers() if self.exported_model is False: self._export_model() self.exported_model = True if self.last_saved_step is not None and not self.exported_collections: self.export_collections() self.exported_collections = True self.last_block = block self._save_custom_tensors_post_step() # This hook is invoked by trainer after running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_hook(self, block, inputs, outputs): if not self._get_collections_to_save_for_step(): return block_name = block.name # This overwhelms the logs; turn back on if you really need it # logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name)) # Output input tensor self._write_inputs(block_name, inputs) # Output output tensors self._write_outputs(block_name, outputs) self.last_saved_step = self.step def _recursive_apply(self, block): """ This function is "applied" to every child in the block. This function in turn registers the forward hook to each module. It helps logging the input output tensors of that module. """ # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) def _is_recursive_needed(self): collections_to_save = self.include_collections # Check if default collection has a regex associated with it. # If it does we would need to apply hook recursively. if ( len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0 and CollectionKeys.DEFAULT in collections_to_save ): return True # Get the collections that are to be saved but are not part of default collections # We will need to apply hook recursively to get tensors specified in those collections. extra_coll = [ value for value in collections_to_save if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK ] # extra_coll contains the collections that are not part of default collections. return len(extra_coll) != 0 def register_hook(self, block): # for compatibility with ZCC patches which call this self.register_block(block) # MASKED: register_block function (lines 224-258) @staticmethod def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs): return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs) @staticmethod def _make_numpy_array(tensor_value): if isinstance(tensor_value, NDArray): return tensor_value.asnumpy() return make_numpy_array(tensor_value)
@error_handling_agent.catch_smdebug_errors() def register_block(self, block): """ This function registers the forward hook. If user wants to register the hook for every child in the given block, then the function calls "apply" API for registration of the hook. The hook is registered recursively, if user has specified the collections that are more than the default collectors viz. gradients, weight and bias """ if not isinstance(block, mx.gluon.Block): self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.") return # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return # Skip the forward pre hook for the Loss blocks. if isinstance(block, mx.gluon.loss.Loss): self.logger.info(f"Registering hook for block {block.name}") block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) return else: self.model = block is_recursive = self._is_recursive_needed() block.register_forward_pre_hook(self.forward_pre_hook) if is_recursive is True: block.apply(self._recursive_apply) else: block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block)
224
258
# Third Party import mxnet as mx from mxnet.ndarray import NDArray # First Party from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys from smdebug.core.hook import CallbackHook from smdebug.core.json_config import DEFAULT_WORKER_NAME from smdebug.core.utils import FRAMEWORK, error_handling_agent from smdebug.mxnet.collection import CollectionManager from smdebug.mxnet.graph import _net2pb from smdebug.mxnet.singleton_utils import set_hook from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array from smdebug.profiler.profiler_config_parser import get_profiler_config_parser DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES] COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [ CollectionKeys.WEIGHTS, CollectionKeys.BIASES, CollectionKeys.GRADIENTS, CollectionKeys.LOSSES, ] profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH) class Hook(CallbackHook): def __init__( self, out_dir=None, export_tensorboard=False, tensorboard_dir=None, dry_run=False, reduction_config=None, save_config=None, include_regex=None, include_collections=None, save_all=False, include_workers="one", ): collection_manager = CollectionManager() super().__init__( collection_manager=collection_manager, default_include_collections=DEFAULT_INCLUDE_COLLECTIONS, profiler_config_parser=profiler_config_parser, data_type_name=mx.ndarray.NDArray.__name__, out_dir=out_dir, export_tensorboard=export_tensorboard, tensorboard_dir=tensorboard_dir, dry_run=dry_run, reduction_config=reduction_config, save_config=save_config, include_regex=include_regex, include_collections=include_collections, save_all=save_all, include_workers=include_workers, ) self.last_block = None self.model = None self.exported_model = False # Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well. self.registered_blocks = set() self.worker = self._get_worker_name() set_hook(self) def _get_worker_name(self): try: import horovod.mxnet as hvd if hvd.size(): return f"worker_{hvd.rank()}" except (ModuleNotFoundError, ValueError, ImportError): pass return DEFAULT_WORKER_NAME def _get_num_workers(self): try: import horovod.mxnet as hvd if hvd.size(): return hvd.size() except (ModuleNotFoundError, ValueError, ImportError): pass return 1 def _cleanup(self): # Write the gradients of the past step if the writer is still available. if self.writer is not None and self.last_block is not None: self._log_params(self.last_block) if self.exported_model is False: self._export_model() super()._cleanup() def _log_params(self, block): params = block.collect_params().values() for param in params: self._log_param(param) def _log_param(self, param): try: self._save_for_tensor( tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0]) ) # If Gradient for this param is available if param.grad_req != "null": self._save_for_tensor( tensor_name=self.GRADIENT_PREFIX + param.name, tensor_value=param.grad(param.list_ctx()[0]), ) except RuntimeError as e: self.logger.warning( f"Could not log parameter {param.name} due to the mxnet exception: {e}" ) def _export_model(self): if self.model is not None: try: tb_writer = self._maybe_get_tb_writer() if tb_writer: tb_writer.write_graph(_net2pb(self.model)) except (RuntimeError, TypeError) as e: self.logger.warning( f"Could not export model graph for tensorboard " f"due to the mxnet exception: {e}" ) def _get_default_collections(self): return DEFAULT_MXNET_COLLECTIONS # This hook is invoked by trainer prior to running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_pre_hook(self, block, inputs): if self.writer is not None: # Write the params and gradients of the # past step if the writer is still available. self._log_params(block) self._close_writers() self._close_tb_writer() if not self.prepared_collections: # at this point we need all collections to be ready # this may not be the case at creation of hook # as user's code after hook might add collections self._prepare_collections() self.prepared_collections = True self._increment_step() if self._get_collections_to_save_for_step(): self._initialize_writers() if self.exported_model is False: self._export_model() self.exported_model = True if self.last_saved_step is not None and not self.exported_collections: self.export_collections() self.exported_collections = True self.last_block = block self._save_custom_tensors_post_step() # This hook is invoked by trainer after running the forward pass. @error_handling_agent.catch_smdebug_errors() def forward_hook(self, block, inputs, outputs): if not self._get_collections_to_save_for_step(): return block_name = block.name # This overwhelms the logs; turn back on if you really need it # logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name)) # Output input tensor self._write_inputs(block_name, inputs) # Output output tensors self._write_outputs(block_name, outputs) self.last_saved_step = self.step def _recursive_apply(self, block): """ This function is "applied" to every child in the block. This function in turn registers the forward hook to each module. It helps logging the input output tensors of that module. """ # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) def _is_recursive_needed(self): collections_to_save = self.include_collections # Check if default collection has a regex associated with it. # If it does we would need to apply hook recursively. if ( len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0 and CollectionKeys.DEFAULT in collections_to_save ): return True # Get the collections that are to be saved but are not part of default collections # We will need to apply hook recursively to get tensors specified in those collections. extra_coll = [ value for value in collections_to_save if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK ] # extra_coll contains the collections that are not part of default collections. return len(extra_coll) != 0 def register_hook(self, block): # for compatibility with ZCC patches which call this self.register_block(block) @error_handling_agent.catch_smdebug_errors() def register_block(self, block): """ This function registers the forward hook. If user wants to register the hook for every child in the given block, then the function calls "apply" API for registration of the hook. The hook is registered recursively, if user has specified the collections that are more than the default collectors viz. gradients, weight and bias """ if not isinstance(block, mx.gluon.Block): self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.") return # Check if the hook is already registered for this block. if block in self.registered_blocks: self.logger.warning(f"The hook is already registered to block {block.name}") return # Skip the forward pre hook for the Loss blocks. if isinstance(block, mx.gluon.loss.Loss): self.logger.info(f"Registering hook for block {block.name}") block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) return else: self.model = block is_recursive = self._is_recursive_needed() block.register_forward_pre_hook(self.forward_pre_hook) if is_recursive is True: block.apply(self._recursive_apply) else: block.register_forward_hook(self.forward_hook) self.registered_blocks.add(block) @staticmethod def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs): return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs) @staticmethod def _make_numpy_array(tensor_value): if isinstance(tensor_value, NDArray): return tensor_value.asnumpy() return make_numpy_array(tensor_value)
step
Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss.
""" """ from __future__ import division from torch.optim.optimizer import Optimizer, required import numpy as np import torch from typing import NamedTuple, List from dataclasses import dataclass from enum import Enum from typing import Union, Tuple # from scipy.sparse.linalg import svds from scipy.optimize import minimize_scalar class LayerType(Enum): CONV = 1 FC = 2 NON_CONV = 3 @dataclass class LayerMetrics: rank: float KG: float condition: float @dataclass class ConvLayerMetrics: input_channel: LayerMetrics output_channel: LayerMetrics class LRMetrics(NamedTuple): rank_velocity: List[float] r_conv: List[float] def EVBMF(Y, sigma2=None, H=None): """Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization. This function can be used to calculate the analytical solution to empirical VBMF. This is based on the paper and MatLab code by Nakajima et al.: "Global analytic solution of fully-observed variational Bayesian matrix factorization." Notes ----- If sigma2 is unspecified, it is estimated by minimizing the free energy. If H is unspecified, it is set to the smallest of the sides of the input Y. Attributes ---------- Y : numpy-array Input matrix that is to be factorized. Y has shape (L,M), where L<=M. sigma2 : int or None (default=None) Variance of the noise on Y. H : int or None (default = None) Maximum rank of the factorized matrices. Returns ------- U : numpy-array Left-singular vectors. S : numpy-array Diagonal matrix of singular values. V : numpy-array Right-singular vectors. post : dictionary Dictionary containing the computed posterior values. References ---------- .. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37. .. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012. """ L, M = Y.shape # has to be L<=M if H is None: H = L alpha = L / M tauubar = 2.5129 * np.sqrt(alpha) # SVD of the input matrix, max rank of H # U, s, V = np.linalg.svd(Y) U, s, V = torch.svd(Y) U = U[:, :H] s = s[:H] V = V[:H].T # Calculate residual residual = 0. if H < L: # residual = np.sum(np.sum(Y**2)-np.sum(s**2)) residual = torch.sum(np.sum(Y**2) - np.sum(s**2)) # Estimation of the variance when sigma2 is unspecified if sigma2 is None: xubar = (1 + tauubar) * (1 + alpha / tauubar) eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1 # upper_bound = (np.sum(s**2)+residual)/(L*M) # lower_bound = np.max( # [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M]) upper_bound = (torch.sum(s**2) + residual) / (L * M) lower_bound = torch.max(torch.stack( [s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0)) scale = 1. # /lower_bound s = s * np.sqrt(scale) residual = residual * scale lower_bound = lower_bound * scale upper_bound = upper_bound * scale sigma2_opt = minimize_scalar( EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar), bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()], method='Bounded') sigma2 = sigma2_opt.x # Threshold gamma term threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar)) # pos = np.sum(s > threshold) pos = torch.sum(s > threshold) # Formula (15) from [2] # d = torch.multiply(s[:pos]/2, # 1-torch.divide( # torch.tensor((L+M)*sigma2, device=s.device), # s[:pos]**2) + torch.sqrt((1-torch.divide( # torch.tensor( # (L+M)*sigma2, device=s.device), # s[:pos]**2))**2 - # 4*L*M*sigma2**2/s[:pos]**4)) # d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt( # (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4)) d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2 + torch.sqrt((1 - (L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4)) # Computation of the posterior # post = {} # post['ma'] = np.zeros(H) # post['mb'] = np.zeros(H) # post['sa2'] = np.zeros(H) # post['sb2'] = np.zeros(H) # post['cacb'] = np.zeros(H) # tau = np.multiply(d, s[:pos])/(M*sigma2) # delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau) # post['ma'][:pos] = np.sqrt(np.multiply(d, delta)) # post['mb'][:pos] = np.sqrt(np.divide(d, delta)) # post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos]) # post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos])) # post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M)) # post['sigma2'] = sigma2 # post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) + # (residual+np.sum(s**2))/sigma2 + np.sum( # M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau)) return U[:, :pos], torch.diag(d), V[:, :pos] # , post def EVBsigma2(sigma2, L, M, s, residual, xubar): H = len(s) alpha = L / M x = s**2 / (M * sigma2) z1 = x[x > xubar] z2 = x[x <= xubar] tau_z1 = tau(z1, alpha) term1 = np.sum(z2 - np.log(z2)) term2 = np.sum(z1 - tau_z1) term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1))) term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1)) obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2) return obj def phi0(x): return x - np.log(x) def phi1(x, alpha): return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1 ) - tau(x, alpha) def tau(x, alpha): return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha)) class Metrics: def __init__(self, params, linear: bool = False) -> None: ''' parameters: list of torch.nn.Module.parameters() ''' self.params = params self.history = list() mask = list() for param_idx, param in enumerate(params): param_shape = param.shape if not linear: if len(param_shape) != 4: mask.append(param_idx) else: if len(param_shape) != 4 and len(param_shape) != 2: mask.append(param_idx) self.mask = set(mask) def compute_low_rank(self, tensor: torch.Tensor, normalizer: float) -> torch.Tensor: if tensor.requires_grad: tensor = tensor.detach() try: tensor_size = tensor.shape if tensor_size[0] > tensor_size[1]: tensor = tensor.T U_approx, S_approx, V_approx = EVBMF(tensor) except RuntimeError: return None, None, None rank = S_approx.shape[0] / tensor_size[0] # normalizer low_rank_eigen = torch.diag(S_approx).data.cpu().numpy() if len(low_rank_eigen) != 0: condition = low_rank_eigen[0] / low_rank_eigen[-1] sum_low_rank_eigen = low_rank_eigen / \ max(low_rank_eigen) sum_low_rank_eigen = np.sum(sum_low_rank_eigen) else: condition = 0 sum_low_rank_eigen = 0 KG = sum_low_rank_eigen / tensor_size[0] # normalizer return rank, KG, condition def KG(self, epoch: int) -> np.ndarray: KG_list = list() for i, (index, metric) in enumerate(self.history[epoch]): if isinstance(metric, ConvLayerMetrics): KG_list.append((metric.input_channel.KG + metric.output_channel.KG) / 2) elif isinstance(metric, LayerMetrics): KG_list.append(metric.KG) return np.array(KG_list) def __call__(self) -> List[Tuple[int, Union[LayerMetrics, ConvLayerMetrics]]]: ''' Computes the knowledge gain (S) and mapping condition (condition) ''' metrics: List[Tuple[int, Union[LayerMetrics, ConvLayerMetrics]]] = list() for layer_index, layer in enumerate(self.params): if layer_index in self.mask: metrics.append((layer_index, None)) continue # if np.less(np.prod(layer.shape), 10_000): # metrics.append((layer_index, None)) if len(layer.shape) == 4: layer_tensor = layer.data tensor_size = layer_tensor.shape mode_3_unfold = layer_tensor.permute(1, 0, 2, 3) mode_3_unfold = torch.reshape( mode_3_unfold, [tensor_size[1], tensor_size[0] * tensor_size[2] * tensor_size[3]]) mode_4_unfold = layer_tensor mode_4_unfold = torch.reshape( mode_4_unfold, [tensor_size[0], tensor_size[1] * tensor_size[2] * tensor_size[3]]) in_rank, in_KG, in_condition = self.compute_low_rank( mode_3_unfold, tensor_size[1]) if in_rank is None and in_KG is None and in_condition is None: if len(self.history) > 0: in_rank = self.history[-1][ layer_index][1].input_channel.rank in_KG = self.history[-1][ layer_index][1].input_channel.KG in_condition = self.history[-1][ layer_index][1].input_channel.condition else: in_rank = in_KG = in_condition = 0. out_rank, out_KG, out_condition = self.compute_low_rank( mode_4_unfold, tensor_size[0]) if out_rank is None and out_KG is None and out_condition is None: if len(self.history) > 0: out_rank = self.history[-1][ layer_index][1].output_channel.rank out_KG = self.history[-1][ layer_index][1].output_channel.KG out_condition = self.history[-1][ layer_index][1].output_channel.condition else: out_rank = out_KG = out_condition = 0. metrics.append((layer_index, ConvLayerMetrics( input_channel=LayerMetrics( rank=in_rank, KG=in_KG, condition=in_condition), output_channel=LayerMetrics( rank=out_rank, KG=out_KG, condition=out_condition)))) elif len(layer.shape) == 2: rank, KG, condition = self.compute_low_rank( layer, layer.shape[0]) if rank is None and KG is None and condition is None: if len(self.history) > 0: rank = self.history[-1][layer_index][1].rank KG = self.history[-1][layer_index][1].KG condition = self.history[-1][layer_index][1].condition else: rank = KG = condition = 0. metrics.append((layer_index, LayerMetrics( rank=rank, KG=KG, condition=condition))) else: metrics.append((layer_index, None)) self.history.append(metrics) return metrics class Adas(Optimizer): """ Vectorized SGD from torch.optim.SGD """ def __init__(self, params, lr: float = required, beta: float = 0.8, step_size: int = None, linear: bool = True, gamma: float = 1, momentum: float = 0, dampening: float = 0, weight_decay: float = 0, nesterov: bool = False): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( "Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError( "Nesterov momentum requires a momentum and zero dampening") super(Adas, self).__init__(params[:2], defaults) # Adas Specific stuff (not SGD) if np.less(beta, 0) or np.greater_equal(beta, 1): raise ValueError(f'Invalid beta: {beta}') if np.less(gamma, 0): raise ValueError(f'Invalid gamma: {gamma}') if step_size is not None: if np.less_equal(step_size, 0): raise ValueError(f'Invalid step_size: {step_size}') self.step_size = step_size self.gamma = gamma self.beta = beta self.metrics = metrics = Metrics(params=params[2]["all_params"], linear=linear) self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params)) self.velocity = np.zeros( len(self.metrics.params) - len(self.metrics.mask)) self.not_ready = list(range(len(self.velocity))) self.init_lr = lr self.zeta = 1. self.KG = 0. def __setstate__(self, state): super(Adas, self).__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) def epoch_step(self, epoch: int) -> None: self.metrics() if epoch == 0: velocity = self.init_lr * np.ones(len(self.velocity)) self.KG = self.metrics.KG(epoch) else: KG = self.metrics.KG(epoch) velocity = KG - self.KG self.KG = KG for idx in self.not_ready: if np.isclose(KG[idx], 0.): velocity[idx] = self.init_lr - \ self.beta * self.velocity[idx] else: self.not_ready.remove(idx) if self.step_size is not None: if epoch % self.step_size == 0 and epoch > 0: self.lr_vector *= self.gamma self.zeta *= self.gamma self.velocity = np.maximum( self.beta * self.velocity + self.zeta * velocity, 0.) count = 0 for i in range(len(self.metrics.params)): if i in self.metrics.mask: self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)] else: self.lr_vector[i] = self.velocity[count] count += 1 # MASKED: step function (lines 432-473)
def step(self, closure: callable = None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() iteration_group = 0 for group in self.param_groups: iteration_group += 1 weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p_index, p in enumerate(group['params']): if p.grad is None: continue d_p = p.grad.data if weight_decay != 0: d_p.add_(p.data, alpha=weight_decay) if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone( d_p).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(momentum, buf) else: d_p = buf # p.data.add_(-group['lr'], d_p) p.data.add_(d_p, alpha=-self.lr_vector[p_index]) return loss
432
473
""" """ from __future__ import division from torch.optim.optimizer import Optimizer, required import numpy as np import torch from typing import NamedTuple, List from dataclasses import dataclass from enum import Enum from typing import Union, Tuple # from scipy.sparse.linalg import svds from scipy.optimize import minimize_scalar class LayerType(Enum): CONV = 1 FC = 2 NON_CONV = 3 @dataclass class LayerMetrics: rank: float KG: float condition: float @dataclass class ConvLayerMetrics: input_channel: LayerMetrics output_channel: LayerMetrics class LRMetrics(NamedTuple): rank_velocity: List[float] r_conv: List[float] def EVBMF(Y, sigma2=None, H=None): """Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization. This function can be used to calculate the analytical solution to empirical VBMF. This is based on the paper and MatLab code by Nakajima et al.: "Global analytic solution of fully-observed variational Bayesian matrix factorization." Notes ----- If sigma2 is unspecified, it is estimated by minimizing the free energy. If H is unspecified, it is set to the smallest of the sides of the input Y. Attributes ---------- Y : numpy-array Input matrix that is to be factorized. Y has shape (L,M), where L<=M. sigma2 : int or None (default=None) Variance of the noise on Y. H : int or None (default = None) Maximum rank of the factorized matrices. Returns ------- U : numpy-array Left-singular vectors. S : numpy-array Diagonal matrix of singular values. V : numpy-array Right-singular vectors. post : dictionary Dictionary containing the computed posterior values. References ---------- .. [1] Nakajima, Shinichi, et al. "Global analytic solution of fully-observed variational Bayesian matrix factorization." Journal of Machine Learning Research 14.Jan (2013): 1-37. .. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by variational Bayesian PCA." Advances in Neural Information Processing Systems. 2012. """ L, M = Y.shape # has to be L<=M if H is None: H = L alpha = L / M tauubar = 2.5129 * np.sqrt(alpha) # SVD of the input matrix, max rank of H # U, s, V = np.linalg.svd(Y) U, s, V = torch.svd(Y) U = U[:, :H] s = s[:H] V = V[:H].T # Calculate residual residual = 0. if H < L: # residual = np.sum(np.sum(Y**2)-np.sum(s**2)) residual = torch.sum(np.sum(Y**2) - np.sum(s**2)) # Estimation of the variance when sigma2 is unspecified if sigma2 is None: xubar = (1 + tauubar) * (1 + alpha / tauubar) eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1 # upper_bound = (np.sum(s**2)+residual)/(L*M) # lower_bound = np.max( # [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M]) upper_bound = (torch.sum(s**2) + residual) / (L * M) lower_bound = torch.max(torch.stack( [s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0)) scale = 1. # /lower_bound s = s * np.sqrt(scale) residual = residual * scale lower_bound = lower_bound * scale upper_bound = upper_bound * scale sigma2_opt = minimize_scalar( EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar), bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()], method='Bounded') sigma2 = sigma2_opt.x # Threshold gamma term threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar)) # pos = np.sum(s > threshold) pos = torch.sum(s > threshold) # Formula (15) from [2] # d = torch.multiply(s[:pos]/2, # 1-torch.divide( # torch.tensor((L+M)*sigma2, device=s.device), # s[:pos]**2) + torch.sqrt((1-torch.divide( # torch.tensor( # (L+M)*sigma2, device=s.device), # s[:pos]**2))**2 - # 4*L*M*sigma2**2/s[:pos]**4)) # d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt( # (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4)) d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2 + torch.sqrt((1 - (L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4)) # Computation of the posterior # post = {} # post['ma'] = np.zeros(H) # post['mb'] = np.zeros(H) # post['sa2'] = np.zeros(H) # post['sb2'] = np.zeros(H) # post['cacb'] = np.zeros(H) # tau = np.multiply(d, s[:pos])/(M*sigma2) # delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau) # post['ma'][:pos] = np.sqrt(np.multiply(d, delta)) # post['mb'][:pos] = np.sqrt(np.divide(d, delta)) # post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos]) # post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos])) # post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M)) # post['sigma2'] = sigma2 # post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) + # (residual+np.sum(s**2))/sigma2 + np.sum( # M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau)) return U[:, :pos], torch.diag(d), V[:, :pos] # , post def EVBsigma2(sigma2, L, M, s, residual, xubar): H = len(s) alpha = L / M x = s**2 / (M * sigma2) z1 = x[x > xubar] z2 = x[x <= xubar] tau_z1 = tau(z1, alpha) term1 = np.sum(z2 - np.log(z2)) term2 = np.sum(z1 - tau_z1) term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1))) term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1)) obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2) return obj def phi0(x): return x - np.log(x) def phi1(x, alpha): return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1 ) - tau(x, alpha) def tau(x, alpha): return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha)) class Metrics: def __init__(self, params, linear: bool = False) -> None: ''' parameters: list of torch.nn.Module.parameters() ''' self.params = params self.history = list() mask = list() for param_idx, param in enumerate(params): param_shape = param.shape if not linear: if len(param_shape) != 4: mask.append(param_idx) else: if len(param_shape) != 4 and len(param_shape) != 2: mask.append(param_idx) self.mask = set(mask) def compute_low_rank(self, tensor: torch.Tensor, normalizer: float) -> torch.Tensor: if tensor.requires_grad: tensor = tensor.detach() try: tensor_size = tensor.shape if tensor_size[0] > tensor_size[1]: tensor = tensor.T U_approx, S_approx, V_approx = EVBMF(tensor) except RuntimeError: return None, None, None rank = S_approx.shape[0] / tensor_size[0] # normalizer low_rank_eigen = torch.diag(S_approx).data.cpu().numpy() if len(low_rank_eigen) != 0: condition = low_rank_eigen[0] / low_rank_eigen[-1] sum_low_rank_eigen = low_rank_eigen / \ max(low_rank_eigen) sum_low_rank_eigen = np.sum(sum_low_rank_eigen) else: condition = 0 sum_low_rank_eigen = 0 KG = sum_low_rank_eigen / tensor_size[0] # normalizer return rank, KG, condition def KG(self, epoch: int) -> np.ndarray: KG_list = list() for i, (index, metric) in enumerate(self.history[epoch]): if isinstance(metric, ConvLayerMetrics): KG_list.append((metric.input_channel.KG + metric.output_channel.KG) / 2) elif isinstance(metric, LayerMetrics): KG_list.append(metric.KG) return np.array(KG_list) def __call__(self) -> List[Tuple[int, Union[LayerMetrics, ConvLayerMetrics]]]: ''' Computes the knowledge gain (S) and mapping condition (condition) ''' metrics: List[Tuple[int, Union[LayerMetrics, ConvLayerMetrics]]] = list() for layer_index, layer in enumerate(self.params): if layer_index in self.mask: metrics.append((layer_index, None)) continue # if np.less(np.prod(layer.shape), 10_000): # metrics.append((layer_index, None)) if len(layer.shape) == 4: layer_tensor = layer.data tensor_size = layer_tensor.shape mode_3_unfold = layer_tensor.permute(1, 0, 2, 3) mode_3_unfold = torch.reshape( mode_3_unfold, [tensor_size[1], tensor_size[0] * tensor_size[2] * tensor_size[3]]) mode_4_unfold = layer_tensor mode_4_unfold = torch.reshape( mode_4_unfold, [tensor_size[0], tensor_size[1] * tensor_size[2] * tensor_size[3]]) in_rank, in_KG, in_condition = self.compute_low_rank( mode_3_unfold, tensor_size[1]) if in_rank is None and in_KG is None and in_condition is None: if len(self.history) > 0: in_rank = self.history[-1][ layer_index][1].input_channel.rank in_KG = self.history[-1][ layer_index][1].input_channel.KG in_condition = self.history[-1][ layer_index][1].input_channel.condition else: in_rank = in_KG = in_condition = 0. out_rank, out_KG, out_condition = self.compute_low_rank( mode_4_unfold, tensor_size[0]) if out_rank is None and out_KG is None and out_condition is None: if len(self.history) > 0: out_rank = self.history[-1][ layer_index][1].output_channel.rank out_KG = self.history[-1][ layer_index][1].output_channel.KG out_condition = self.history[-1][ layer_index][1].output_channel.condition else: out_rank = out_KG = out_condition = 0. metrics.append((layer_index, ConvLayerMetrics( input_channel=LayerMetrics( rank=in_rank, KG=in_KG, condition=in_condition), output_channel=LayerMetrics( rank=out_rank, KG=out_KG, condition=out_condition)))) elif len(layer.shape) == 2: rank, KG, condition = self.compute_low_rank( layer, layer.shape[0]) if rank is None and KG is None and condition is None: if len(self.history) > 0: rank = self.history[-1][layer_index][1].rank KG = self.history[-1][layer_index][1].KG condition = self.history[-1][layer_index][1].condition else: rank = KG = condition = 0. metrics.append((layer_index, LayerMetrics( rank=rank, KG=KG, condition=condition))) else: metrics.append((layer_index, None)) self.history.append(metrics) return metrics class Adas(Optimizer): """ Vectorized SGD from torch.optim.SGD """ def __init__(self, params, lr: float = required, beta: float = 0.8, step_size: int = None, linear: bool = True, gamma: float = 1, momentum: float = 0, dampening: float = 0, weight_decay: float = 0, nesterov: bool = False): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError( "Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError( "Nesterov momentum requires a momentum and zero dampening") super(Adas, self).__init__(params[:2], defaults) # Adas Specific stuff (not SGD) if np.less(beta, 0) or np.greater_equal(beta, 1): raise ValueError(f'Invalid beta: {beta}') if np.less(gamma, 0): raise ValueError(f'Invalid gamma: {gamma}') if step_size is not None: if np.less_equal(step_size, 0): raise ValueError(f'Invalid step_size: {step_size}') self.step_size = step_size self.gamma = gamma self.beta = beta self.metrics = metrics = Metrics(params=params[2]["all_params"], linear=linear) self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params)) self.velocity = np.zeros( len(self.metrics.params) - len(self.metrics.mask)) self.not_ready = list(range(len(self.velocity))) self.init_lr = lr self.zeta = 1. self.KG = 0. def __setstate__(self, state): super(Adas, self).__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) def epoch_step(self, epoch: int) -> None: self.metrics() if epoch == 0: velocity = self.init_lr * np.ones(len(self.velocity)) self.KG = self.metrics.KG(epoch) else: KG = self.metrics.KG(epoch) velocity = KG - self.KG self.KG = KG for idx in self.not_ready: if np.isclose(KG[idx], 0.): velocity[idx] = self.init_lr - \ self.beta * self.velocity[idx] else: self.not_ready.remove(idx) if self.step_size is not None: if epoch % self.step_size == 0 and epoch > 0: self.lr_vector *= self.gamma self.zeta *= self.gamma self.velocity = np.maximum( self.beta * self.velocity + self.zeta * velocity, 0.) count = 0 for i in range(len(self.metrics.params)): if i in self.metrics.mask: self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)] else: self.lr_vector[i] = self.velocity[count] count += 1 def step(self, closure: callable = None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() iteration_group = 0 for group in self.param_groups: iteration_group += 1 weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p_index, p in enumerate(group['params']): if p.grad is None: continue d_p = p.grad.data if weight_decay != 0: d_p.add_(p.data, alpha=weight_decay) if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone( d_p).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(momentum, buf) else: d_p = buf # p.data.add_(-group['lr'], d_p) p.data.add_(d_p, alpha=-self.lr_vector[p_index]) return loss
get_value
Returns the displayed text for the desired column in this row. The formula or input which generated the displayed value is not accessible through the list feed, to see the user's input, use the cells feed. If a column is not present in this spreadsheet, or there is no value for a column in this row, this method will return None.
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License 2.0; # This module is used for version 2 of the Google Data APIs. """Provides classes and constants for the XML in the Google Spreadsheets API. Documentation for the raw XML which these classes represent can be found here: http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements """ # __author__ = '[email protected] (Jeff Scudder)' import atom.core import gdata.data GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended' INSERT_MODE = 'insert' OVERWRITE_MODE = 'overwrite' WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed' BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells' '/%s/%s/private/full') BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s' BATCH_EDIT_LINK_TEMPLATE = '%s/batch' class Error(Exception): pass class FieldMissing(Exception): pass class HeaderNotSet(Error): """The desired column header had no value for the row in the list feed.""" class Cell(atom.core.XmlElement): """The gs:cell element. A cell in the worksheet. The <gs:cell> element can appear only as a child of <atom:entry>. """ _qname = GS_TEMPLATE % 'cell' col = 'col' input_value = 'inputValue' numeric_value = 'numericValue' row = 'row' class ColCount(atom.core.XmlElement): """The gs:colCount element. Indicates the number of columns in the worksheet, including columns that contain only empty cells. The <gs:colCount> element can appear as a child of <atom:entry> or <atom:feed> """ _qname = GS_TEMPLATE % 'colCount' class Field(atom.core.XmlElement): """The gs:field element. A field single cell within a record. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'field' index = 'index' name = 'name' class Column(Field): """The gs:column element.""" _qname = GS_TEMPLATE % 'column' class Data(atom.core.XmlElement): """The gs:data element. A data region of a table. Contained in an <atom:entry> element. """ _qname = GS_TEMPLATE % 'data' column = [Column] insertion_mode = 'insertionMode' num_rows = 'numRows' start_row = 'startRow' class Header(atom.core.XmlElement): """The gs:header element. Indicates which row is the header row. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'header' row = 'row' class RowCount(atom.core.XmlElement): """The gs:rowCount element. Indicates the number of total rows in the worksheet, including rows that contain only empty cells. The <gs:rowCount> element can appear as a child of <atom:entry> or <atom:feed>. """ _qname = GS_TEMPLATE % 'rowCount' class Worksheet(atom.core.XmlElement): """The gs:worksheet element. The worksheet where the table lives.Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'worksheet' name = 'name' class Spreadsheet(gdata.data.GDEntry): """An Atom entry which represents a Google Spreadsheet.""" def find_worksheets_feed(self): return self.find_url(WORKSHEETS_REL) FindWorksheetsFeed = find_worksheets_feed def get_spreadsheet_key(self): """Extracts the spreadsheet key unique to this spreadsheet.""" return self.get_id().split('/')[-1] GetSpreadsheetKey = get_spreadsheet_key class SpreadsheetsFeed(gdata.data.GDFeed): """An Atom feed listing a user's Google Spreadsheets.""" entry = [Spreadsheet] class WorksheetEntry(gdata.data.GDEntry): """An Atom entry representing a single worksheet in a spreadsheet.""" row_count = RowCount col_count = ColCount def get_worksheet_id(self): """The worksheet ID identifies this worksheet in its spreadsheet.""" return self.get_id().split('/')[-1] GetWorksheetId = get_worksheet_id class WorksheetsFeed(gdata.data.GDFeed): """A feed containing the worksheets in a single spreadsheet.""" entry = [WorksheetEntry] class Table(gdata.data.GDEntry): """An Atom entry that represents a subsection of a worksheet. A table allows you to treat part or all of a worksheet somewhat like a table in a database that is, as a set of structured data items. Tables don't exist until you explicitly create them before you can use a table feed, you have to explicitly define where the table data comes from. """ data = Data header = Header worksheet = Worksheet def get_table_id(self): if self.id.text: return self.id.text.split('/')[-1] return None GetTableId = get_table_id class TablesFeed(gdata.data.GDFeed): """An Atom feed containing the tables defined within a worksheet.""" entry = [Table] class Record(gdata.data.GDEntry): """An Atom entry representing a single record in a table. Note that the order of items in each record is the same as the order of columns in the table definition, which may not match the order of columns in the GUI. """ field = [Field] def value_for_index(self, column_index): for field in self.field: if field.index == column_index: return field.text raise FieldMissing('There is no field for %s' % column_index) ValueForIndex = value_for_index def value_for_name(self, name): for field in self.field: if field.name == name: return field.text raise FieldMissing('There is no field for %s' % name) ValueForName = value_for_name def get_record_id(self): if self.id.text: return self.id.text.split('/')[-1] return None class RecordsFeed(gdata.data.GDFeed): """An Atom feed containing the individuals records in a table.""" entry = [Record] class ListRow(atom.core.XmlElement): """A gsx column value within a row. The local tag in the _qname is blank and must be set to the column name. For example, when adding to a ListEntry, do: col_value = ListRow(text='something') col_value._qname = col_value._qname % 'mycolumnname' """ _qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s' class ListEntry(gdata.data.GDEntry): """An Atom entry representing a worksheet row in the list feed. The values for a particular column can be get and set using x.get_value('columnheader') and x.set_value('columnheader', 'value'). See also the explanation of column names in the ListFeed class. """ # MASKED: get_value function (lines 244-256) def set_value(self, column_name, value): """Changes the value of cell in this row under the desired column name. Warning: if the cell contained a formula, it will be wiped out by setting the value using the list feed since the list feed only works with displayed values. No client side checking is performed on the column_name, you need to ensure that the column_name is the local tag name in the gsx tag for the column. For example, the column_name will not contain special characters, spaces, uppercase letters, etc. """ # Try to find the column in this row to change an existing value. values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) > 0: values[0].text = value else: # There is no value in this row for the desired column, so add a new # gsx:column_name element. new_value = ListRow(text=value) new_value._qname = new_value._qname % (column_name,) self._other_elements.append(new_value) def to_dict(self): """Converts this row to a mapping of column names to their values.""" result = {} values = self.get_elements(namespace=GSX_NAMESPACE) for item in values: result[item._get_tag()] = item.text return result def from_dict(self, values): """Sets values for this row from the dictionary. Old values which are already in the entry will not be removed unless they are overwritten with new values from the dict. """ for column, value in values.items(): self.set_value(column, value) class ListsFeed(gdata.data.GDFeed): """An Atom feed in which each entry represents a row in a worksheet. The first row in the worksheet is used as the column names for the values in each row. If a header cell is empty, then a unique column ID is used for the gsx element name. Spaces in a column name are removed from the name of the corresponding gsx element. Caution: The columnNames are case-insensitive. For example, if you see a <gsx:e-mail> element in a feed, you can't know whether the column heading in the original worksheet was "e-mail" or "E-Mail". Note: If two or more columns have the same name, then subsequent columns of the same name have _n appended to the columnName. For example, if the first column name is "e-mail", followed by columns named "E-Mail" and "E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and gsx:e-mail_3 respectively. """ entry = [ListEntry] class CellEntry(gdata.data.BatchEntry): """An Atom entry representing a single cell in a worksheet.""" cell = Cell class CellsFeed(gdata.data.BatchFeed): """An Atom feed contains one entry per cell in a worksheet. The cell feed supports batch operations, you can send multiple cell operations in one HTTP request. """ entry = [CellEntry] def add_set_cell(self, row, col, input_value): """Adds a request to change the contents of a cell to this batch request. Args: row: int, The row number for this cell. Numbering starts at 1. col: int, The column number for this cell. Starts at 1. input_value: str, The desired formula/content this cell should contain. """ self.add_update(CellEntry( id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % ( self.id.text, row, col)), cell=Cell(col=str(col), row=str(row), input_value=input_value))) return self AddSetCell = add_set_cell def build_batch_cells_update(spreadsheet_key, worksheet_id): """Creates an empty cells feed for adding batch cell updates to. Call batch_set_cell on the resulting CellsFeed instance then send the batch request TODO: fill in Args: spreadsheet_key: The ID of the spreadsheet worksheet_id: """ feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id) return CellsFeed( id=atom.data.Id(text=feed_id_text), link=[atom.data.Link( rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))]) BuildBatchCellsUpdate = build_batch_cells_update
def get_value(self, column_name): """Returns the displayed text for the desired column in this row. The formula or input which generated the displayed value is not accessible through the list feed, to see the user's input, use the cells feed. If a column is not present in this spreadsheet, or there is no value for a column in this row, this method will return None. """ values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) == 0: return None return values[0].text
244
256
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License 2.0; # This module is used for version 2 of the Google Data APIs. """Provides classes and constants for the XML in the Google Spreadsheets API. Documentation for the raw XML which these classes represent can be found here: http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements """ # __author__ = '[email protected] (Jeff Scudder)' import atom.core import gdata.data GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended' INSERT_MODE = 'insert' OVERWRITE_MODE = 'overwrite' WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed' BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells' '/%s/%s/private/full') BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s' BATCH_EDIT_LINK_TEMPLATE = '%s/batch' class Error(Exception): pass class FieldMissing(Exception): pass class HeaderNotSet(Error): """The desired column header had no value for the row in the list feed.""" class Cell(atom.core.XmlElement): """The gs:cell element. A cell in the worksheet. The <gs:cell> element can appear only as a child of <atom:entry>. """ _qname = GS_TEMPLATE % 'cell' col = 'col' input_value = 'inputValue' numeric_value = 'numericValue' row = 'row' class ColCount(atom.core.XmlElement): """The gs:colCount element. Indicates the number of columns in the worksheet, including columns that contain only empty cells. The <gs:colCount> element can appear as a child of <atom:entry> or <atom:feed> """ _qname = GS_TEMPLATE % 'colCount' class Field(atom.core.XmlElement): """The gs:field element. A field single cell within a record. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'field' index = 'index' name = 'name' class Column(Field): """The gs:column element.""" _qname = GS_TEMPLATE % 'column' class Data(atom.core.XmlElement): """The gs:data element. A data region of a table. Contained in an <atom:entry> element. """ _qname = GS_TEMPLATE % 'data' column = [Column] insertion_mode = 'insertionMode' num_rows = 'numRows' start_row = 'startRow' class Header(atom.core.XmlElement): """The gs:header element. Indicates which row is the header row. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'header' row = 'row' class RowCount(atom.core.XmlElement): """The gs:rowCount element. Indicates the number of total rows in the worksheet, including rows that contain only empty cells. The <gs:rowCount> element can appear as a child of <atom:entry> or <atom:feed>. """ _qname = GS_TEMPLATE % 'rowCount' class Worksheet(atom.core.XmlElement): """The gs:worksheet element. The worksheet where the table lives.Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'worksheet' name = 'name' class Spreadsheet(gdata.data.GDEntry): """An Atom entry which represents a Google Spreadsheet.""" def find_worksheets_feed(self): return self.find_url(WORKSHEETS_REL) FindWorksheetsFeed = find_worksheets_feed def get_spreadsheet_key(self): """Extracts the spreadsheet key unique to this spreadsheet.""" return self.get_id().split('/')[-1] GetSpreadsheetKey = get_spreadsheet_key class SpreadsheetsFeed(gdata.data.GDFeed): """An Atom feed listing a user's Google Spreadsheets.""" entry = [Spreadsheet] class WorksheetEntry(gdata.data.GDEntry): """An Atom entry representing a single worksheet in a spreadsheet.""" row_count = RowCount col_count = ColCount def get_worksheet_id(self): """The worksheet ID identifies this worksheet in its spreadsheet.""" return self.get_id().split('/')[-1] GetWorksheetId = get_worksheet_id class WorksheetsFeed(gdata.data.GDFeed): """A feed containing the worksheets in a single spreadsheet.""" entry = [WorksheetEntry] class Table(gdata.data.GDEntry): """An Atom entry that represents a subsection of a worksheet. A table allows you to treat part or all of a worksheet somewhat like a table in a database that is, as a set of structured data items. Tables don't exist until you explicitly create them before you can use a table feed, you have to explicitly define where the table data comes from. """ data = Data header = Header worksheet = Worksheet def get_table_id(self): if self.id.text: return self.id.text.split('/')[-1] return None GetTableId = get_table_id class TablesFeed(gdata.data.GDFeed): """An Atom feed containing the tables defined within a worksheet.""" entry = [Table] class Record(gdata.data.GDEntry): """An Atom entry representing a single record in a table. Note that the order of items in each record is the same as the order of columns in the table definition, which may not match the order of columns in the GUI. """ field = [Field] def value_for_index(self, column_index): for field in self.field: if field.index == column_index: return field.text raise FieldMissing('There is no field for %s' % column_index) ValueForIndex = value_for_index def value_for_name(self, name): for field in self.field: if field.name == name: return field.text raise FieldMissing('There is no field for %s' % name) ValueForName = value_for_name def get_record_id(self): if self.id.text: return self.id.text.split('/')[-1] return None class RecordsFeed(gdata.data.GDFeed): """An Atom feed containing the individuals records in a table.""" entry = [Record] class ListRow(atom.core.XmlElement): """A gsx column value within a row. The local tag in the _qname is blank and must be set to the column name. For example, when adding to a ListEntry, do: col_value = ListRow(text='something') col_value._qname = col_value._qname % 'mycolumnname' """ _qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s' class ListEntry(gdata.data.GDEntry): """An Atom entry representing a worksheet row in the list feed. The values for a particular column can be get and set using x.get_value('columnheader') and x.set_value('columnheader', 'value'). See also the explanation of column names in the ListFeed class. """ def get_value(self, column_name): """Returns the displayed text for the desired column in this row. The formula or input which generated the displayed value is not accessible through the list feed, to see the user's input, use the cells feed. If a column is not present in this spreadsheet, or there is no value for a column in this row, this method will return None. """ values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) == 0: return None return values[0].text def set_value(self, column_name, value): """Changes the value of cell in this row under the desired column name. Warning: if the cell contained a formula, it will be wiped out by setting the value using the list feed since the list feed only works with displayed values. No client side checking is performed on the column_name, you need to ensure that the column_name is the local tag name in the gsx tag for the column. For example, the column_name will not contain special characters, spaces, uppercase letters, etc. """ # Try to find the column in this row to change an existing value. values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) > 0: values[0].text = value else: # There is no value in this row for the desired column, so add a new # gsx:column_name element. new_value = ListRow(text=value) new_value._qname = new_value._qname % (column_name,) self._other_elements.append(new_value) def to_dict(self): """Converts this row to a mapping of column names to their values.""" result = {} values = self.get_elements(namespace=GSX_NAMESPACE) for item in values: result[item._get_tag()] = item.text return result def from_dict(self, values): """Sets values for this row from the dictionary. Old values which are already in the entry will not be removed unless they are overwritten with new values from the dict. """ for column, value in values.items(): self.set_value(column, value) class ListsFeed(gdata.data.GDFeed): """An Atom feed in which each entry represents a row in a worksheet. The first row in the worksheet is used as the column names for the values in each row. If a header cell is empty, then a unique column ID is used for the gsx element name. Spaces in a column name are removed from the name of the corresponding gsx element. Caution: The columnNames are case-insensitive. For example, if you see a <gsx:e-mail> element in a feed, you can't know whether the column heading in the original worksheet was "e-mail" or "E-Mail". Note: If two or more columns have the same name, then subsequent columns of the same name have _n appended to the columnName. For example, if the first column name is "e-mail", followed by columns named "E-Mail" and "E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and gsx:e-mail_3 respectively. """ entry = [ListEntry] class CellEntry(gdata.data.BatchEntry): """An Atom entry representing a single cell in a worksheet.""" cell = Cell class CellsFeed(gdata.data.BatchFeed): """An Atom feed contains one entry per cell in a worksheet. The cell feed supports batch operations, you can send multiple cell operations in one HTTP request. """ entry = [CellEntry] def add_set_cell(self, row, col, input_value): """Adds a request to change the contents of a cell to this batch request. Args: row: int, The row number for this cell. Numbering starts at 1. col: int, The column number for this cell. Starts at 1. input_value: str, The desired formula/content this cell should contain. """ self.add_update(CellEntry( id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % ( self.id.text, row, col)), cell=Cell(col=str(col), row=str(row), input_value=input_value))) return self AddSetCell = add_set_cell def build_batch_cells_update(spreadsheet_key, worksheet_id): """Creates an empty cells feed for adding batch cell updates to. Call batch_set_cell on the resulting CellsFeed instance then send the batch request TODO: fill in Args: spreadsheet_key: The ID of the spreadsheet worksheet_id: """ feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id) return CellsFeed( id=atom.data.Id(text=feed_id_text), link=[atom.data.Link( rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))]) BuildBatchCellsUpdate = build_batch_cells_update
set_value
Changes the value of cell in this row under the desired column name. Warning: if the cell contained a formula, it will be wiped out by setting the value using the list feed since the list feed only works with displayed values. No client side checking is performed on the column_name, you need to ensure that the column_name is the local tag name in the gsx tag for the column. For example, the column_name will not contain special characters, spaces, uppercase letters, etc.
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License 2.0; # This module is used for version 2 of the Google Data APIs. """Provides classes and constants for the XML in the Google Spreadsheets API. Documentation for the raw XML which these classes represent can be found here: http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements """ # __author__ = '[email protected] (Jeff Scudder)' import atom.core import gdata.data GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended' INSERT_MODE = 'insert' OVERWRITE_MODE = 'overwrite' WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed' BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells' '/%s/%s/private/full') BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s' BATCH_EDIT_LINK_TEMPLATE = '%s/batch' class Error(Exception): pass class FieldMissing(Exception): pass class HeaderNotSet(Error): """The desired column header had no value for the row in the list feed.""" class Cell(atom.core.XmlElement): """The gs:cell element. A cell in the worksheet. The <gs:cell> element can appear only as a child of <atom:entry>. """ _qname = GS_TEMPLATE % 'cell' col = 'col' input_value = 'inputValue' numeric_value = 'numericValue' row = 'row' class ColCount(atom.core.XmlElement): """The gs:colCount element. Indicates the number of columns in the worksheet, including columns that contain only empty cells. The <gs:colCount> element can appear as a child of <atom:entry> or <atom:feed> """ _qname = GS_TEMPLATE % 'colCount' class Field(atom.core.XmlElement): """The gs:field element. A field single cell within a record. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'field' index = 'index' name = 'name' class Column(Field): """The gs:column element.""" _qname = GS_TEMPLATE % 'column' class Data(atom.core.XmlElement): """The gs:data element. A data region of a table. Contained in an <atom:entry> element. """ _qname = GS_TEMPLATE % 'data' column = [Column] insertion_mode = 'insertionMode' num_rows = 'numRows' start_row = 'startRow' class Header(atom.core.XmlElement): """The gs:header element. Indicates which row is the header row. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'header' row = 'row' class RowCount(atom.core.XmlElement): """The gs:rowCount element. Indicates the number of total rows in the worksheet, including rows that contain only empty cells. The <gs:rowCount> element can appear as a child of <atom:entry> or <atom:feed>. """ _qname = GS_TEMPLATE % 'rowCount' class Worksheet(atom.core.XmlElement): """The gs:worksheet element. The worksheet where the table lives.Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'worksheet' name = 'name' class Spreadsheet(gdata.data.GDEntry): """An Atom entry which represents a Google Spreadsheet.""" def find_worksheets_feed(self): return self.find_url(WORKSHEETS_REL) FindWorksheetsFeed = find_worksheets_feed def get_spreadsheet_key(self): """Extracts the spreadsheet key unique to this spreadsheet.""" return self.get_id().split('/')[-1] GetSpreadsheetKey = get_spreadsheet_key class SpreadsheetsFeed(gdata.data.GDFeed): """An Atom feed listing a user's Google Spreadsheets.""" entry = [Spreadsheet] class WorksheetEntry(gdata.data.GDEntry): """An Atom entry representing a single worksheet in a spreadsheet.""" row_count = RowCount col_count = ColCount def get_worksheet_id(self): """The worksheet ID identifies this worksheet in its spreadsheet.""" return self.get_id().split('/')[-1] GetWorksheetId = get_worksheet_id class WorksheetsFeed(gdata.data.GDFeed): """A feed containing the worksheets in a single spreadsheet.""" entry = [WorksheetEntry] class Table(gdata.data.GDEntry): """An Atom entry that represents a subsection of a worksheet. A table allows you to treat part or all of a worksheet somewhat like a table in a database that is, as a set of structured data items. Tables don't exist until you explicitly create them before you can use a table feed, you have to explicitly define where the table data comes from. """ data = Data header = Header worksheet = Worksheet def get_table_id(self): if self.id.text: return self.id.text.split('/')[-1] return None GetTableId = get_table_id class TablesFeed(gdata.data.GDFeed): """An Atom feed containing the tables defined within a worksheet.""" entry = [Table] class Record(gdata.data.GDEntry): """An Atom entry representing a single record in a table. Note that the order of items in each record is the same as the order of columns in the table definition, which may not match the order of columns in the GUI. """ field = [Field] def value_for_index(self, column_index): for field in self.field: if field.index == column_index: return field.text raise FieldMissing('There is no field for %s' % column_index) ValueForIndex = value_for_index def value_for_name(self, name): for field in self.field: if field.name == name: return field.text raise FieldMissing('There is no field for %s' % name) ValueForName = value_for_name def get_record_id(self): if self.id.text: return self.id.text.split('/')[-1] return None class RecordsFeed(gdata.data.GDFeed): """An Atom feed containing the individuals records in a table.""" entry = [Record] class ListRow(atom.core.XmlElement): """A gsx column value within a row. The local tag in the _qname is blank and must be set to the column name. For example, when adding to a ListEntry, do: col_value = ListRow(text='something') col_value._qname = col_value._qname % 'mycolumnname' """ _qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s' class ListEntry(gdata.data.GDEntry): """An Atom entry representing a worksheet row in the list feed. The values for a particular column can be get and set using x.get_value('columnheader') and x.set_value('columnheader', 'value'). See also the explanation of column names in the ListFeed class. """ def get_value(self, column_name): """Returns the displayed text for the desired column in this row. The formula or input which generated the displayed value is not accessible through the list feed, to see the user's input, use the cells feed. If a column is not present in this spreadsheet, or there is no value for a column in this row, this method will return None. """ values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) == 0: return None return values[0].text # MASKED: set_value function (lines 258-279) def to_dict(self): """Converts this row to a mapping of column names to their values.""" result = {} values = self.get_elements(namespace=GSX_NAMESPACE) for item in values: result[item._get_tag()] = item.text return result def from_dict(self, values): """Sets values for this row from the dictionary. Old values which are already in the entry will not be removed unless they are overwritten with new values from the dict. """ for column, value in values.items(): self.set_value(column, value) class ListsFeed(gdata.data.GDFeed): """An Atom feed in which each entry represents a row in a worksheet. The first row in the worksheet is used as the column names for the values in each row. If a header cell is empty, then a unique column ID is used for the gsx element name. Spaces in a column name are removed from the name of the corresponding gsx element. Caution: The columnNames are case-insensitive. For example, if you see a <gsx:e-mail> element in a feed, you can't know whether the column heading in the original worksheet was "e-mail" or "E-Mail". Note: If two or more columns have the same name, then subsequent columns of the same name have _n appended to the columnName. For example, if the first column name is "e-mail", followed by columns named "E-Mail" and "E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and gsx:e-mail_3 respectively. """ entry = [ListEntry] class CellEntry(gdata.data.BatchEntry): """An Atom entry representing a single cell in a worksheet.""" cell = Cell class CellsFeed(gdata.data.BatchFeed): """An Atom feed contains one entry per cell in a worksheet. The cell feed supports batch operations, you can send multiple cell operations in one HTTP request. """ entry = [CellEntry] def add_set_cell(self, row, col, input_value): """Adds a request to change the contents of a cell to this batch request. Args: row: int, The row number for this cell. Numbering starts at 1. col: int, The column number for this cell. Starts at 1. input_value: str, The desired formula/content this cell should contain. """ self.add_update(CellEntry( id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % ( self.id.text, row, col)), cell=Cell(col=str(col), row=str(row), input_value=input_value))) return self AddSetCell = add_set_cell def build_batch_cells_update(spreadsheet_key, worksheet_id): """Creates an empty cells feed for adding batch cell updates to. Call batch_set_cell on the resulting CellsFeed instance then send the batch request TODO: fill in Args: spreadsheet_key: The ID of the spreadsheet worksheet_id: """ feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id) return CellsFeed( id=atom.data.Id(text=feed_id_text), link=[atom.data.Link( rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))]) BuildBatchCellsUpdate = build_batch_cells_update
def set_value(self, column_name, value): """Changes the value of cell in this row under the desired column name. Warning: if the cell contained a formula, it will be wiped out by setting the value using the list feed since the list feed only works with displayed values. No client side checking is performed on the column_name, you need to ensure that the column_name is the local tag name in the gsx tag for the column. For example, the column_name will not contain special characters, spaces, uppercase letters, etc. """ # Try to find the column in this row to change an existing value. values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) > 0: values[0].text = value else: # There is no value in this row for the desired column, so add a new # gsx:column_name element. new_value = ListRow(text=value) new_value._qname = new_value._qname % (column_name,) self._other_elements.append(new_value)
258
279
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License 2.0; # This module is used for version 2 of the Google Data APIs. """Provides classes and constants for the XML in the Google Spreadsheets API. Documentation for the raw XML which these classes represent can be found here: http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements """ # __author__ = '[email protected] (Jeff Scudder)' import atom.core import gdata.data GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended' INSERT_MODE = 'insert' OVERWRITE_MODE = 'overwrite' WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed' BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells' '/%s/%s/private/full') BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s' BATCH_EDIT_LINK_TEMPLATE = '%s/batch' class Error(Exception): pass class FieldMissing(Exception): pass class HeaderNotSet(Error): """The desired column header had no value for the row in the list feed.""" class Cell(atom.core.XmlElement): """The gs:cell element. A cell in the worksheet. The <gs:cell> element can appear only as a child of <atom:entry>. """ _qname = GS_TEMPLATE % 'cell' col = 'col' input_value = 'inputValue' numeric_value = 'numericValue' row = 'row' class ColCount(atom.core.XmlElement): """The gs:colCount element. Indicates the number of columns in the worksheet, including columns that contain only empty cells. The <gs:colCount> element can appear as a child of <atom:entry> or <atom:feed> """ _qname = GS_TEMPLATE % 'colCount' class Field(atom.core.XmlElement): """The gs:field element. A field single cell within a record. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'field' index = 'index' name = 'name' class Column(Field): """The gs:column element.""" _qname = GS_TEMPLATE % 'column' class Data(atom.core.XmlElement): """The gs:data element. A data region of a table. Contained in an <atom:entry> element. """ _qname = GS_TEMPLATE % 'data' column = [Column] insertion_mode = 'insertionMode' num_rows = 'numRows' start_row = 'startRow' class Header(atom.core.XmlElement): """The gs:header element. Indicates which row is the header row. Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'header' row = 'row' class RowCount(atom.core.XmlElement): """The gs:rowCount element. Indicates the number of total rows in the worksheet, including rows that contain only empty cells. The <gs:rowCount> element can appear as a child of <atom:entry> or <atom:feed>. """ _qname = GS_TEMPLATE % 'rowCount' class Worksheet(atom.core.XmlElement): """The gs:worksheet element. The worksheet where the table lives.Contained in an <atom:entry>. """ _qname = GS_TEMPLATE % 'worksheet' name = 'name' class Spreadsheet(gdata.data.GDEntry): """An Atom entry which represents a Google Spreadsheet.""" def find_worksheets_feed(self): return self.find_url(WORKSHEETS_REL) FindWorksheetsFeed = find_worksheets_feed def get_spreadsheet_key(self): """Extracts the spreadsheet key unique to this spreadsheet.""" return self.get_id().split('/')[-1] GetSpreadsheetKey = get_spreadsheet_key class SpreadsheetsFeed(gdata.data.GDFeed): """An Atom feed listing a user's Google Spreadsheets.""" entry = [Spreadsheet] class WorksheetEntry(gdata.data.GDEntry): """An Atom entry representing a single worksheet in a spreadsheet.""" row_count = RowCount col_count = ColCount def get_worksheet_id(self): """The worksheet ID identifies this worksheet in its spreadsheet.""" return self.get_id().split('/')[-1] GetWorksheetId = get_worksheet_id class WorksheetsFeed(gdata.data.GDFeed): """A feed containing the worksheets in a single spreadsheet.""" entry = [WorksheetEntry] class Table(gdata.data.GDEntry): """An Atom entry that represents a subsection of a worksheet. A table allows you to treat part or all of a worksheet somewhat like a table in a database that is, as a set of structured data items. Tables don't exist until you explicitly create them before you can use a table feed, you have to explicitly define where the table data comes from. """ data = Data header = Header worksheet = Worksheet def get_table_id(self): if self.id.text: return self.id.text.split('/')[-1] return None GetTableId = get_table_id class TablesFeed(gdata.data.GDFeed): """An Atom feed containing the tables defined within a worksheet.""" entry = [Table] class Record(gdata.data.GDEntry): """An Atom entry representing a single record in a table. Note that the order of items in each record is the same as the order of columns in the table definition, which may not match the order of columns in the GUI. """ field = [Field] def value_for_index(self, column_index): for field in self.field: if field.index == column_index: return field.text raise FieldMissing('There is no field for %s' % column_index) ValueForIndex = value_for_index def value_for_name(self, name): for field in self.field: if field.name == name: return field.text raise FieldMissing('There is no field for %s' % name) ValueForName = value_for_name def get_record_id(self): if self.id.text: return self.id.text.split('/')[-1] return None class RecordsFeed(gdata.data.GDFeed): """An Atom feed containing the individuals records in a table.""" entry = [Record] class ListRow(atom.core.XmlElement): """A gsx column value within a row. The local tag in the _qname is blank and must be set to the column name. For example, when adding to a ListEntry, do: col_value = ListRow(text='something') col_value._qname = col_value._qname % 'mycolumnname' """ _qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s' class ListEntry(gdata.data.GDEntry): """An Atom entry representing a worksheet row in the list feed. The values for a particular column can be get and set using x.get_value('columnheader') and x.set_value('columnheader', 'value'). See also the explanation of column names in the ListFeed class. """ def get_value(self, column_name): """Returns the displayed text for the desired column in this row. The formula or input which generated the displayed value is not accessible through the list feed, to see the user's input, use the cells feed. If a column is not present in this spreadsheet, or there is no value for a column in this row, this method will return None. """ values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) == 0: return None return values[0].text def set_value(self, column_name, value): """Changes the value of cell in this row under the desired column name. Warning: if the cell contained a formula, it will be wiped out by setting the value using the list feed since the list feed only works with displayed values. No client side checking is performed on the column_name, you need to ensure that the column_name is the local tag name in the gsx tag for the column. For example, the column_name will not contain special characters, spaces, uppercase letters, etc. """ # Try to find the column in this row to change an existing value. values = self.get_elements(column_name, GSX_NAMESPACE) if len(values) > 0: values[0].text = value else: # There is no value in this row for the desired column, so add a new # gsx:column_name element. new_value = ListRow(text=value) new_value._qname = new_value._qname % (column_name,) self._other_elements.append(new_value) def to_dict(self): """Converts this row to a mapping of column names to their values.""" result = {} values = self.get_elements(namespace=GSX_NAMESPACE) for item in values: result[item._get_tag()] = item.text return result def from_dict(self, values): """Sets values for this row from the dictionary. Old values which are already in the entry will not be removed unless they are overwritten with new values from the dict. """ for column, value in values.items(): self.set_value(column, value) class ListsFeed(gdata.data.GDFeed): """An Atom feed in which each entry represents a row in a worksheet. The first row in the worksheet is used as the column names for the values in each row. If a header cell is empty, then a unique column ID is used for the gsx element name. Spaces in a column name are removed from the name of the corresponding gsx element. Caution: The columnNames are case-insensitive. For example, if you see a <gsx:e-mail> element in a feed, you can't know whether the column heading in the original worksheet was "e-mail" or "E-Mail". Note: If two or more columns have the same name, then subsequent columns of the same name have _n appended to the columnName. For example, if the first column name is "e-mail", followed by columns named "E-Mail" and "E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and gsx:e-mail_3 respectively. """ entry = [ListEntry] class CellEntry(gdata.data.BatchEntry): """An Atom entry representing a single cell in a worksheet.""" cell = Cell class CellsFeed(gdata.data.BatchFeed): """An Atom feed contains one entry per cell in a worksheet. The cell feed supports batch operations, you can send multiple cell operations in one HTTP request. """ entry = [CellEntry] def add_set_cell(self, row, col, input_value): """Adds a request to change the contents of a cell to this batch request. Args: row: int, The row number for this cell. Numbering starts at 1. col: int, The column number for this cell. Starts at 1. input_value: str, The desired formula/content this cell should contain. """ self.add_update(CellEntry( id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % ( self.id.text, row, col)), cell=Cell(col=str(col), row=str(row), input_value=input_value))) return self AddSetCell = add_set_cell def build_batch_cells_update(spreadsheet_key, worksheet_id): """Creates an empty cells feed for adding batch cell updates to. Call batch_set_cell on the resulting CellsFeed instance then send the batch request TODO: fill in Args: spreadsheet_key: The ID of the spreadsheet worksheet_id: """ feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id) return CellsFeed( id=atom.data.Id(text=feed_id_text), link=[atom.data.Link( rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))]) BuildBatchCellsUpdate = build_batch_cells_update
__getattribute__
Attribute lookup method for custom class attributes. ReFrame test variables are descriptors injected at the class level. If a variable descriptor has already been injected into the class, do not return the descriptor object and return the default value associated with that variable instead. .. warning:: .. versionchanged:: 3.7.0 Prior versions exposed the variable descriptor object if this was already present in the class, instead of returning the variable's default value.
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # # Meta-class for creating regression tests. # import functools import types import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression class RegressionTestMeta(type): class MetaNamespace(namespaces.LocalNamespace): '''Custom namespace to control the cls attribute assignment. Regular Python class attributes can be overridden by either parameters or variables respecting the order of execution. A variable or a parameter may not be declared more than once in the same class body. Overriding a variable with a parameter or the other way around has an undefined behavior. A variable's value may be updated multiple times within the same class body. A parameter's value cannot be updated more than once within the same class body. ''' def __setitem__(self, key, value): if isinstance(value, variables.TestVar): # Insert the attribute in the variable namespace try: self['_rfm_local_var_space'][key] = value value.__set_name__(self, key) except KeyError: raise ReframeSyntaxError( f'variable {key!r} is already declared' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: self['_rfm_local_param_space'][key] = value except KeyError: raise ReframeSyntaxError( f'parameter {key!r} is already declared in this class' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot override parameter {key!r}' ) else: # Insert the items manually to overide the namespace clash # check from the base namespace. self._namespace[key] = value # Register functions decorated with either @sanity_function or # @performance_variables or @performance_function decorators. if hasattr(value, '_rfm_sanity_fn'): try: super().__setitem__('_rfm_sanity', value) except KeyError: raise ReframeSyntaxError( 'the @sanity_function decorator can only be used ' 'once in the class body' ) from None elif hasattr(value, '_rfm_perf_key'): try: self['_rfm_perf_fns'][key] = value except KeyError: raise ReframeSyntaxError( f'the performance function {key!r} has already been ' f'defined in this class' ) from None # Register the final methods if hasattr(value, '_rfm_final'): self['_rfm_final_methods'].add(key) # Register the hooks - if a value does not meet the conditions # it will be simply ignored self['_rfm_hook_registry'].add(value) def __getitem__(self, key): '''Expose and control access to the local namespaces. Variables may only be retrieved if their value has been previously set. Accessing a parameter in the class body is disallowed (the actual test parameter is set during the class instantiation). ''' try: return super().__getitem__(key) except KeyError as err: try: # Handle variable access return self['_rfm_local_var_space'][key] except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: raise ReframeSyntaxError( 'accessing a test parameter from the class ' 'body is disallowed' ) from None else: # As the last resource, look if key is a variable in # any of the base classes. If so, make its value # available in the current class' namespace. for b in self['_rfm_bases']: if key in b._rfm_var_space: # Store a deep-copy of the variable's # value and return. v = b._rfm_var_space[key].default_value self._namespace[key] = v return self._namespace[key] # If 'key' is neither a variable nor a parameter, # raise the exception from the base __getitem__. raise err from None def reset(self, key): '''Reset an item to rerun it through the __setitem__ logic.''' self[key] = self[key] class WrappedFunction: '''Descriptor to wrap a free function as a bound-method. The free function object is wrapped by the constructor. Instances of this class should be inserted into the namespace of the target class with the desired name for the bound-method. Since this class is a descriptor, the `__get__` method will return the right bound-method when accessed from a class instance. :meta private: ''' __slots__ = ('fn') def __init__(self, fn, name=None): @functools.wraps(fn) def _fn(*args, **kwargs): return fn(*args, **kwargs) self.fn = _fn if name: self.fn.__name__ = name def __get__(self, obj, objtype=None): if objtype is None: objtype = type(obj) self.fn.__qualname__ = '.'.join( [objtype.__qualname__, self.fn.__name__] ) if obj is None: return self.fn return types.MethodType(self.fn, obj) def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) def __getattr__(self, name): if name in self.__slots__: return super().__getattr__(name) else: return getattr(self.fn, name) def __setattr__(self, name, value): if name in self.__slots__: super().__setattr__(name, value) else: setattr(self.fn, name, value) @classmethod def __prepare__(metacls, name, bases, **kwargs): namespace = super().__prepare__(name, bases, **kwargs) # Keep reference to the bases inside the namespace namespace['_rfm_bases'] = [ b for b in bases if hasattr(b, '_rfm_var_space') ] # Regression test parameter space defined at the class level local_param_space = namespaces.LocalNamespace() namespace['_rfm_local_param_space'] = local_param_space # Directive to insert a regression test parameter directly in the # class body as: `P0 = parameter([0,1,2,3])`. namespace['parameter'] = parameters.TestParam # Regression test var space defined at the class level local_var_space = namespaces.LocalNamespace() namespace['_rfm_local_var_space'] = local_var_space # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined # Utility decorators namespace['_rfm_ext_bound'] = set() def bind(fn, name=None): '''Directive to bind a free function to a class. See online docs for more information. .. note:: Functions bound using this directive must be re-inspected after the class body execution has completed. This directive attaches the external method into the class namespace and returns the associated instance of the :class:`WrappedFunction`. However, this instance may be further modified by other ReFrame builtins such as :func:`run_before`, :func:`run_after`, :func:`final` and so on after it was added to the namespace, which would bypass the logic implemented in the :func:`__setitem__` method from the :class:`MetaNamespace` class. Hence, we track the items set by this directive in the ``_rfm_ext_bound`` set, so they can be later re-inspected. ''' inst = metacls.WrappedFunction(fn, name) namespace[inst.__name__] = inst # Track the imported external functions namespace['_rfm_ext_bound'].add(inst.__name__) return inst def final(fn): '''Indicate that a function is final and cannot be overridden.''' fn._rfm_final = True return fn namespace['bind'] = bind namespace['final'] = final namespace['_rfm_final_methods'] = set() # Hook-related functionality def run_before(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('pre_' + stage) def run_after(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('post_' + stage) namespace['run_before'] = run_before namespace['run_after'] = run_after namespace['require_deps'] = hooks.require_deps namespace['_rfm_hook_registry'] = hooks.HookRegistry() # Machinery to add a sanity function def sanity_function(fn): '''Mark a function as the test's sanity function. Decorated functions must be unary and they will be converted into deferred expressions. ''' _def_fn = deferrable(fn) setattr(_def_fn, '_rfm_sanity_fn', True) return _def_fn namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable # Machinery to add performance functions def performance_function(units, *, perf_key=None): '''Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. The ``perf_key`` optional arg will be used as the name of the performance variable. If not provided, the function name will be used as the performance variable name. ''' if not isinstance(units, str): raise TypeError('performance units must be a string') if perf_key and not isinstance(perf_key, str): raise TypeError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): raise TypeError( f'performance function {func.__name__!r} has more ' f'than one argument without a default value' ) @functools.wraps(func) def _perf_fn(*args, **kwargs): return _DeferredPerformanceExpression( func, units, *args, **kwargs ) _perf_key = perf_key if perf_key else func.__name__ setattr(_perf_fn, '_rfm_perf_key', _perf_key) return _perf_fn return _deco_wrapper namespace['performance_function'] = performance_function namespace['_rfm_perf_fns'] = namespaces.LocalNamespace() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): '''Remove directives from the class namespace. It does not make sense to have some directives available after the class was created or even at the instance level (e.g. doing ``self.parameter([1, 2, 3])`` does not make sense). So here, we intercept those directives out of the namespace before the class is constructed. ''' directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', 'final', 'performance_function' ] for b in directives: namespace.pop(b, None) # Reset the external functions imported through the bind directive. for item in namespace.pop('_rfm_ext_bound'): namespace.reset(item) return super().__new__(metacls, name, bases, dict(namespace), **kwargs) def __init__(cls, name, bases, namespace, **kwargs): super().__init__(name, bases, namespace, **kwargs) # Create a set with the attribute names already in use. cls._rfm_dir = set() for base in (b for b in bases if hasattr(b, '_rfm_dir')): cls._rfm_dir.update(base._rfm_dir) used_attribute_names = set(cls._rfm_dir) # Build the var space and extend the target namespace variables.VarSpace(cls, used_attribute_names) used_attribute_names.update(cls._rfm_var_space.vars) # Build the parameter space parameters.ParamSpace(cls, used_attribute_names) # Update used names set with the local __dict__ cls._rfm_dir.update(cls.__dict__) # Update the hook registry with the bases for base in cls._rfm_bases: cls._rfm_hook_registry.update( base._rfm_hook_registry, denied_hooks=namespace ) # Search the bases if no local sanity functions exist. if '_rfm_sanity' not in namespace: for base in cls._rfm_bases: if hasattr(base, '_rfm_sanity'): cls._rfm_sanity = getattr(base, '_rfm_sanity') if cls._rfm_sanity.__name__ in namespace: raise ReframeSyntaxError( f'{cls.__qualname__!r} overrides the candidate ' f'sanity function ' f'{cls._rfm_sanity.__qualname__!r} without ' f'defining an alternative' ) break # Update the performance function dict with the bases. for base in cls._rfm_bases: for k, v in base._rfm_perf_fns.items(): if k not in namespace: try: cls._rfm_perf_fns[k] = v except KeyError: '''Performance function overridden by other class''' # Add the final functions from its parents cls._rfm_final_methods.update( *(b._rfm_final_methods for b in cls._rfm_bases) ) if getattr(cls, '_rfm_override_final', None): return for b in cls._rfm_bases: for key in b._rfm_final_methods: if key in namespace and callable(namespace[key]): msg = (f"'{cls.__qualname__}.{key}' attempts to " f"override final method " f"'{b.__qualname__}.{key}'; " f"you should use the pipeline hooks instead") raise ReframeSyntaxError(msg) def __call__(cls, *args, **kwargs): '''Inject parameter and variable spaces during object construction. When a class is instantiated, this method intercepts the arguments associated to the parameter and variable spaces. This prevents both :func:`__new__` and :func:`__init__` methods from ever seing these arguments. The parameter and variable spaces are injected into the object after construction and before initialization. ''' # Intercept constructor arguments _rfm_use_params = kwargs.pop('_rfm_use_params', False) obj = cls.__new__(cls, *args, **kwargs) # Insert the var & param spaces cls._rfm_var_space.inject(obj, cls) cls._rfm_param_space.inject(obj, cls, _rfm_use_params) obj.__init__(*args, **kwargs) return obj # MASKED: __getattribute__ function (lines 446-471) def __getattr__(cls, name): '''Backup attribute lookup method into custom namespaces. Some ReFrame built-in types are stored under their own sub-namespaces. This method will perform an attribute lookup on these sub-namespaces if a call to the default :func:`__getattribute__` method fails to retrieve the requested class attribute. ''' try: var_space = super().__getattribute__('_rfm_var_space') return var_space.vars[name] except AttributeError: '''Catch early access attempt to the variable space.''' except KeyError: '''Requested name not in variable space.''' try: param_space = super().__getattribute__('_rfm_param_space') return param_space.params[name] except AttributeError: '''Catch early access attempt to the parameter space.''' except KeyError: '''Requested name not in parameter space.''' raise AttributeError( f'class {cls.__qualname__!r} has no attribute {name!r}' ) from None def setvar(cls, name, value): '''Set the value of a variable. :param name: The name of the variable. :param value: The value of the variable. :returns: :class:`True` if the variable was set. A variable will *not* be set, if it does not exist or when an attempt is made to set it with its underlying descriptor. This happens during the variable injection time and it should be delegated to the class' :func:`__setattr__` method. :raises ReframeSyntaxError: If an attempt is made to override a variable with a descriptor other than its underlying one. ''' try: var_space = super().__getattribute__('_rfm_var_space') if name in var_space: if not hasattr(value, '__get__'): var_space[name].define(value) return True elif var_space[name].field is not value: desc = '.'.join([cls.__qualname__, name]) raise ReframeSyntaxError( f'cannot override variable descriptor {desc!r}' ) else: # Variable is being injected return False except AttributeError: '''Catch early access attempt to the variable space.''' return False def __setattr__(cls, name, value): '''Handle the special treatment required for variables and parameters. A variable's default value can be updated when accessed as a regular class attribute. This behavior does not apply when the assigned value is a descriptor object. In that case, the task of setting the value is delegated to the base :func:`__setattr__` (this is to comply with standard Python behavior). However, since the variables are already descriptors which are injected during class instantiation, we disallow any attempt to override this descriptor (since it would be silently re-overridden in any case). Altering the value of a parameter when accessed as a class attribute is not allowed. This would break the parameter space internals. ''' # Try to treat `name` as variable if cls.setvar(name, value): return # Try to treat `name` as a parameter try: # Catch attempts to override a test parameter param_space = super().__getattribute__('_rfm_param_space') if name in param_space.params: raise ReframeSyntaxError(f'cannot override parameter {name!r}') except AttributeError: '''Catch early access attempt to the parameter space.''' # Treat `name` as normal class attribute super().__setattr__(name, value) @property def param_space(cls): ''' Make the parameter space available as read-only.''' return cls._rfm_param_space def is_abstract(cls): '''Check if the class is an abstract test. This is the case when some parameters are undefined, which results in the length of the parameter space being 0. :return: bool indicating whether the test has undefined parameters. :meta private: ''' return len(cls.param_space) == 0
def __getattribute__(cls, name): '''Attribute lookup method for custom class attributes. ReFrame test variables are descriptors injected at the class level. If a variable descriptor has already been injected into the class, do not return the descriptor object and return the default value associated with that variable instead. .. warning:: .. versionchanged:: 3.7.0 Prior versions exposed the variable descriptor object if this was already present in the class, instead of returning the variable's default value. ''' try: var_space = super().__getattribute__('_rfm_var_space') except AttributeError: var_space = None # If the variable is already injected, delegate lookup to __getattr__. if var_space and name in var_space.injected_vars: raise AttributeError('delegate variable lookup to __getattr__') # Default back to the base method if no special treatment required. return super().__getattribute__(name)
446
471
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # # Meta-class for creating regression tests. # import functools import types import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression class RegressionTestMeta(type): class MetaNamespace(namespaces.LocalNamespace): '''Custom namespace to control the cls attribute assignment. Regular Python class attributes can be overridden by either parameters or variables respecting the order of execution. A variable or a parameter may not be declared more than once in the same class body. Overriding a variable with a parameter or the other way around has an undefined behavior. A variable's value may be updated multiple times within the same class body. A parameter's value cannot be updated more than once within the same class body. ''' def __setitem__(self, key, value): if isinstance(value, variables.TestVar): # Insert the attribute in the variable namespace try: self['_rfm_local_var_space'][key] = value value.__set_name__(self, key) except KeyError: raise ReframeSyntaxError( f'variable {key!r} is already declared' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: self['_rfm_local_param_space'][key] = value except KeyError: raise ReframeSyntaxError( f'parameter {key!r} is already declared in this class' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot override parameter {key!r}' ) else: # Insert the items manually to overide the namespace clash # check from the base namespace. self._namespace[key] = value # Register functions decorated with either @sanity_function or # @performance_variables or @performance_function decorators. if hasattr(value, '_rfm_sanity_fn'): try: super().__setitem__('_rfm_sanity', value) except KeyError: raise ReframeSyntaxError( 'the @sanity_function decorator can only be used ' 'once in the class body' ) from None elif hasattr(value, '_rfm_perf_key'): try: self['_rfm_perf_fns'][key] = value except KeyError: raise ReframeSyntaxError( f'the performance function {key!r} has already been ' f'defined in this class' ) from None # Register the final methods if hasattr(value, '_rfm_final'): self['_rfm_final_methods'].add(key) # Register the hooks - if a value does not meet the conditions # it will be simply ignored self['_rfm_hook_registry'].add(value) def __getitem__(self, key): '''Expose and control access to the local namespaces. Variables may only be retrieved if their value has been previously set. Accessing a parameter in the class body is disallowed (the actual test parameter is set during the class instantiation). ''' try: return super().__getitem__(key) except KeyError as err: try: # Handle variable access return self['_rfm_local_var_space'][key] except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: raise ReframeSyntaxError( 'accessing a test parameter from the class ' 'body is disallowed' ) from None else: # As the last resource, look if key is a variable in # any of the base classes. If so, make its value # available in the current class' namespace. for b in self['_rfm_bases']: if key in b._rfm_var_space: # Store a deep-copy of the variable's # value and return. v = b._rfm_var_space[key].default_value self._namespace[key] = v return self._namespace[key] # If 'key' is neither a variable nor a parameter, # raise the exception from the base __getitem__. raise err from None def reset(self, key): '''Reset an item to rerun it through the __setitem__ logic.''' self[key] = self[key] class WrappedFunction: '''Descriptor to wrap a free function as a bound-method. The free function object is wrapped by the constructor. Instances of this class should be inserted into the namespace of the target class with the desired name for the bound-method. Since this class is a descriptor, the `__get__` method will return the right bound-method when accessed from a class instance. :meta private: ''' __slots__ = ('fn') def __init__(self, fn, name=None): @functools.wraps(fn) def _fn(*args, **kwargs): return fn(*args, **kwargs) self.fn = _fn if name: self.fn.__name__ = name def __get__(self, obj, objtype=None): if objtype is None: objtype = type(obj) self.fn.__qualname__ = '.'.join( [objtype.__qualname__, self.fn.__name__] ) if obj is None: return self.fn return types.MethodType(self.fn, obj) def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) def __getattr__(self, name): if name in self.__slots__: return super().__getattr__(name) else: return getattr(self.fn, name) def __setattr__(self, name, value): if name in self.__slots__: super().__setattr__(name, value) else: setattr(self.fn, name, value) @classmethod def __prepare__(metacls, name, bases, **kwargs): namespace = super().__prepare__(name, bases, **kwargs) # Keep reference to the bases inside the namespace namespace['_rfm_bases'] = [ b for b in bases if hasattr(b, '_rfm_var_space') ] # Regression test parameter space defined at the class level local_param_space = namespaces.LocalNamespace() namespace['_rfm_local_param_space'] = local_param_space # Directive to insert a regression test parameter directly in the # class body as: `P0 = parameter([0,1,2,3])`. namespace['parameter'] = parameters.TestParam # Regression test var space defined at the class level local_var_space = namespaces.LocalNamespace() namespace['_rfm_local_var_space'] = local_var_space # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined # Utility decorators namespace['_rfm_ext_bound'] = set() def bind(fn, name=None): '''Directive to bind a free function to a class. See online docs for more information. .. note:: Functions bound using this directive must be re-inspected after the class body execution has completed. This directive attaches the external method into the class namespace and returns the associated instance of the :class:`WrappedFunction`. However, this instance may be further modified by other ReFrame builtins such as :func:`run_before`, :func:`run_after`, :func:`final` and so on after it was added to the namespace, which would bypass the logic implemented in the :func:`__setitem__` method from the :class:`MetaNamespace` class. Hence, we track the items set by this directive in the ``_rfm_ext_bound`` set, so they can be later re-inspected. ''' inst = metacls.WrappedFunction(fn, name) namespace[inst.__name__] = inst # Track the imported external functions namespace['_rfm_ext_bound'].add(inst.__name__) return inst def final(fn): '''Indicate that a function is final and cannot be overridden.''' fn._rfm_final = True return fn namespace['bind'] = bind namespace['final'] = final namespace['_rfm_final_methods'] = set() # Hook-related functionality def run_before(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('pre_' + stage) def run_after(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('post_' + stage) namespace['run_before'] = run_before namespace['run_after'] = run_after namespace['require_deps'] = hooks.require_deps namespace['_rfm_hook_registry'] = hooks.HookRegistry() # Machinery to add a sanity function def sanity_function(fn): '''Mark a function as the test's sanity function. Decorated functions must be unary and they will be converted into deferred expressions. ''' _def_fn = deferrable(fn) setattr(_def_fn, '_rfm_sanity_fn', True) return _def_fn namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable # Machinery to add performance functions def performance_function(units, *, perf_key=None): '''Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. The ``perf_key`` optional arg will be used as the name of the performance variable. If not provided, the function name will be used as the performance variable name. ''' if not isinstance(units, str): raise TypeError('performance units must be a string') if perf_key and not isinstance(perf_key, str): raise TypeError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): raise TypeError( f'performance function {func.__name__!r} has more ' f'than one argument without a default value' ) @functools.wraps(func) def _perf_fn(*args, **kwargs): return _DeferredPerformanceExpression( func, units, *args, **kwargs ) _perf_key = perf_key if perf_key else func.__name__ setattr(_perf_fn, '_rfm_perf_key', _perf_key) return _perf_fn return _deco_wrapper namespace['performance_function'] = performance_function namespace['_rfm_perf_fns'] = namespaces.LocalNamespace() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): '''Remove directives from the class namespace. It does not make sense to have some directives available after the class was created or even at the instance level (e.g. doing ``self.parameter([1, 2, 3])`` does not make sense). So here, we intercept those directives out of the namespace before the class is constructed. ''' directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', 'final', 'performance_function' ] for b in directives: namespace.pop(b, None) # Reset the external functions imported through the bind directive. for item in namespace.pop('_rfm_ext_bound'): namespace.reset(item) return super().__new__(metacls, name, bases, dict(namespace), **kwargs) def __init__(cls, name, bases, namespace, **kwargs): super().__init__(name, bases, namespace, **kwargs) # Create a set with the attribute names already in use. cls._rfm_dir = set() for base in (b for b in bases if hasattr(b, '_rfm_dir')): cls._rfm_dir.update(base._rfm_dir) used_attribute_names = set(cls._rfm_dir) # Build the var space and extend the target namespace variables.VarSpace(cls, used_attribute_names) used_attribute_names.update(cls._rfm_var_space.vars) # Build the parameter space parameters.ParamSpace(cls, used_attribute_names) # Update used names set with the local __dict__ cls._rfm_dir.update(cls.__dict__) # Update the hook registry with the bases for base in cls._rfm_bases: cls._rfm_hook_registry.update( base._rfm_hook_registry, denied_hooks=namespace ) # Search the bases if no local sanity functions exist. if '_rfm_sanity' not in namespace: for base in cls._rfm_bases: if hasattr(base, '_rfm_sanity'): cls._rfm_sanity = getattr(base, '_rfm_sanity') if cls._rfm_sanity.__name__ in namespace: raise ReframeSyntaxError( f'{cls.__qualname__!r} overrides the candidate ' f'sanity function ' f'{cls._rfm_sanity.__qualname__!r} without ' f'defining an alternative' ) break # Update the performance function dict with the bases. for base in cls._rfm_bases: for k, v in base._rfm_perf_fns.items(): if k not in namespace: try: cls._rfm_perf_fns[k] = v except KeyError: '''Performance function overridden by other class''' # Add the final functions from its parents cls._rfm_final_methods.update( *(b._rfm_final_methods for b in cls._rfm_bases) ) if getattr(cls, '_rfm_override_final', None): return for b in cls._rfm_bases: for key in b._rfm_final_methods: if key in namespace and callable(namespace[key]): msg = (f"'{cls.__qualname__}.{key}' attempts to " f"override final method " f"'{b.__qualname__}.{key}'; " f"you should use the pipeline hooks instead") raise ReframeSyntaxError(msg) def __call__(cls, *args, **kwargs): '''Inject parameter and variable spaces during object construction. When a class is instantiated, this method intercepts the arguments associated to the parameter and variable spaces. This prevents both :func:`__new__` and :func:`__init__` methods from ever seing these arguments. The parameter and variable spaces are injected into the object after construction and before initialization. ''' # Intercept constructor arguments _rfm_use_params = kwargs.pop('_rfm_use_params', False) obj = cls.__new__(cls, *args, **kwargs) # Insert the var & param spaces cls._rfm_var_space.inject(obj, cls) cls._rfm_param_space.inject(obj, cls, _rfm_use_params) obj.__init__(*args, **kwargs) return obj def __getattribute__(cls, name): '''Attribute lookup method for custom class attributes. ReFrame test variables are descriptors injected at the class level. If a variable descriptor has already been injected into the class, do not return the descriptor object and return the default value associated with that variable instead. .. warning:: .. versionchanged:: 3.7.0 Prior versions exposed the variable descriptor object if this was already present in the class, instead of returning the variable's default value. ''' try: var_space = super().__getattribute__('_rfm_var_space') except AttributeError: var_space = None # If the variable is already injected, delegate lookup to __getattr__. if var_space and name in var_space.injected_vars: raise AttributeError('delegate variable lookup to __getattr__') # Default back to the base method if no special treatment required. return super().__getattribute__(name) def __getattr__(cls, name): '''Backup attribute lookup method into custom namespaces. Some ReFrame built-in types are stored under their own sub-namespaces. This method will perform an attribute lookup on these sub-namespaces if a call to the default :func:`__getattribute__` method fails to retrieve the requested class attribute. ''' try: var_space = super().__getattribute__('_rfm_var_space') return var_space.vars[name] except AttributeError: '''Catch early access attempt to the variable space.''' except KeyError: '''Requested name not in variable space.''' try: param_space = super().__getattribute__('_rfm_param_space') return param_space.params[name] except AttributeError: '''Catch early access attempt to the parameter space.''' except KeyError: '''Requested name not in parameter space.''' raise AttributeError( f'class {cls.__qualname__!r} has no attribute {name!r}' ) from None def setvar(cls, name, value): '''Set the value of a variable. :param name: The name of the variable. :param value: The value of the variable. :returns: :class:`True` if the variable was set. A variable will *not* be set, if it does not exist or when an attempt is made to set it with its underlying descriptor. This happens during the variable injection time and it should be delegated to the class' :func:`__setattr__` method. :raises ReframeSyntaxError: If an attempt is made to override a variable with a descriptor other than its underlying one. ''' try: var_space = super().__getattribute__('_rfm_var_space') if name in var_space: if not hasattr(value, '__get__'): var_space[name].define(value) return True elif var_space[name].field is not value: desc = '.'.join([cls.__qualname__, name]) raise ReframeSyntaxError( f'cannot override variable descriptor {desc!r}' ) else: # Variable is being injected return False except AttributeError: '''Catch early access attempt to the variable space.''' return False def __setattr__(cls, name, value): '''Handle the special treatment required for variables and parameters. A variable's default value can be updated when accessed as a regular class attribute. This behavior does not apply when the assigned value is a descriptor object. In that case, the task of setting the value is delegated to the base :func:`__setattr__` (this is to comply with standard Python behavior). However, since the variables are already descriptors which are injected during class instantiation, we disallow any attempt to override this descriptor (since it would be silently re-overridden in any case). Altering the value of a parameter when accessed as a class attribute is not allowed. This would break the parameter space internals. ''' # Try to treat `name` as variable if cls.setvar(name, value): return # Try to treat `name` as a parameter try: # Catch attempts to override a test parameter param_space = super().__getattribute__('_rfm_param_space') if name in param_space.params: raise ReframeSyntaxError(f'cannot override parameter {name!r}') except AttributeError: '''Catch early access attempt to the parameter space.''' # Treat `name` as normal class attribute super().__setattr__(name, value) @property def param_space(cls): ''' Make the parameter space available as read-only.''' return cls._rfm_param_space def is_abstract(cls): '''Check if the class is an abstract test. This is the case when some parameters are undefined, which results in the length of the parameter space being 0. :return: bool indicating whether the test has undefined parameters. :meta private: ''' return len(cls.param_space) == 0
__getattr__
Backup attribute lookup method into custom namespaces. Some ReFrame built-in types are stored under their own sub-namespaces. This method will perform an attribute lookup on these sub-namespaces if a call to the default :func:`__getattribute__` method fails to retrieve the requested class attribute.
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # # Meta-class for creating regression tests. # import functools import types import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression class RegressionTestMeta(type): class MetaNamespace(namespaces.LocalNamespace): '''Custom namespace to control the cls attribute assignment. Regular Python class attributes can be overridden by either parameters or variables respecting the order of execution. A variable or a parameter may not be declared more than once in the same class body. Overriding a variable with a parameter or the other way around has an undefined behavior. A variable's value may be updated multiple times within the same class body. A parameter's value cannot be updated more than once within the same class body. ''' def __setitem__(self, key, value): if isinstance(value, variables.TestVar): # Insert the attribute in the variable namespace try: self['_rfm_local_var_space'][key] = value value.__set_name__(self, key) except KeyError: raise ReframeSyntaxError( f'variable {key!r} is already declared' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: self['_rfm_local_param_space'][key] = value except KeyError: raise ReframeSyntaxError( f'parameter {key!r} is already declared in this class' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot override parameter {key!r}' ) else: # Insert the items manually to overide the namespace clash # check from the base namespace. self._namespace[key] = value # Register functions decorated with either @sanity_function or # @performance_variables or @performance_function decorators. if hasattr(value, '_rfm_sanity_fn'): try: super().__setitem__('_rfm_sanity', value) except KeyError: raise ReframeSyntaxError( 'the @sanity_function decorator can only be used ' 'once in the class body' ) from None elif hasattr(value, '_rfm_perf_key'): try: self['_rfm_perf_fns'][key] = value except KeyError: raise ReframeSyntaxError( f'the performance function {key!r} has already been ' f'defined in this class' ) from None # Register the final methods if hasattr(value, '_rfm_final'): self['_rfm_final_methods'].add(key) # Register the hooks - if a value does not meet the conditions # it will be simply ignored self['_rfm_hook_registry'].add(value) def __getitem__(self, key): '''Expose and control access to the local namespaces. Variables may only be retrieved if their value has been previously set. Accessing a parameter in the class body is disallowed (the actual test parameter is set during the class instantiation). ''' try: return super().__getitem__(key) except KeyError as err: try: # Handle variable access return self['_rfm_local_var_space'][key] except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: raise ReframeSyntaxError( 'accessing a test parameter from the class ' 'body is disallowed' ) from None else: # As the last resource, look if key is a variable in # any of the base classes. If so, make its value # available in the current class' namespace. for b in self['_rfm_bases']: if key in b._rfm_var_space: # Store a deep-copy of the variable's # value and return. v = b._rfm_var_space[key].default_value self._namespace[key] = v return self._namespace[key] # If 'key' is neither a variable nor a parameter, # raise the exception from the base __getitem__. raise err from None def reset(self, key): '''Reset an item to rerun it through the __setitem__ logic.''' self[key] = self[key] class WrappedFunction: '''Descriptor to wrap a free function as a bound-method. The free function object is wrapped by the constructor. Instances of this class should be inserted into the namespace of the target class with the desired name for the bound-method. Since this class is a descriptor, the `__get__` method will return the right bound-method when accessed from a class instance. :meta private: ''' __slots__ = ('fn') def __init__(self, fn, name=None): @functools.wraps(fn) def _fn(*args, **kwargs): return fn(*args, **kwargs) self.fn = _fn if name: self.fn.__name__ = name def __get__(self, obj, objtype=None): if objtype is None: objtype = type(obj) self.fn.__qualname__ = '.'.join( [objtype.__qualname__, self.fn.__name__] ) if obj is None: return self.fn return types.MethodType(self.fn, obj) def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) def __getattr__(self, name): if name in self.__slots__: return super().__getattr__(name) else: return getattr(self.fn, name) def __setattr__(self, name, value): if name in self.__slots__: super().__setattr__(name, value) else: setattr(self.fn, name, value) @classmethod def __prepare__(metacls, name, bases, **kwargs): namespace = super().__prepare__(name, bases, **kwargs) # Keep reference to the bases inside the namespace namespace['_rfm_bases'] = [ b for b in bases if hasattr(b, '_rfm_var_space') ] # Regression test parameter space defined at the class level local_param_space = namespaces.LocalNamespace() namespace['_rfm_local_param_space'] = local_param_space # Directive to insert a regression test parameter directly in the # class body as: `P0 = parameter([0,1,2,3])`. namespace['parameter'] = parameters.TestParam # Regression test var space defined at the class level local_var_space = namespaces.LocalNamespace() namespace['_rfm_local_var_space'] = local_var_space # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined # Utility decorators namespace['_rfm_ext_bound'] = set() def bind(fn, name=None): '''Directive to bind a free function to a class. See online docs for more information. .. note:: Functions bound using this directive must be re-inspected after the class body execution has completed. This directive attaches the external method into the class namespace and returns the associated instance of the :class:`WrappedFunction`. However, this instance may be further modified by other ReFrame builtins such as :func:`run_before`, :func:`run_after`, :func:`final` and so on after it was added to the namespace, which would bypass the logic implemented in the :func:`__setitem__` method from the :class:`MetaNamespace` class. Hence, we track the items set by this directive in the ``_rfm_ext_bound`` set, so they can be later re-inspected. ''' inst = metacls.WrappedFunction(fn, name) namespace[inst.__name__] = inst # Track the imported external functions namespace['_rfm_ext_bound'].add(inst.__name__) return inst def final(fn): '''Indicate that a function is final and cannot be overridden.''' fn._rfm_final = True return fn namespace['bind'] = bind namespace['final'] = final namespace['_rfm_final_methods'] = set() # Hook-related functionality def run_before(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('pre_' + stage) def run_after(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('post_' + stage) namespace['run_before'] = run_before namespace['run_after'] = run_after namespace['require_deps'] = hooks.require_deps namespace['_rfm_hook_registry'] = hooks.HookRegistry() # Machinery to add a sanity function def sanity_function(fn): '''Mark a function as the test's sanity function. Decorated functions must be unary and they will be converted into deferred expressions. ''' _def_fn = deferrable(fn) setattr(_def_fn, '_rfm_sanity_fn', True) return _def_fn namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable # Machinery to add performance functions def performance_function(units, *, perf_key=None): '''Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. The ``perf_key`` optional arg will be used as the name of the performance variable. If not provided, the function name will be used as the performance variable name. ''' if not isinstance(units, str): raise TypeError('performance units must be a string') if perf_key and not isinstance(perf_key, str): raise TypeError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): raise TypeError( f'performance function {func.__name__!r} has more ' f'than one argument without a default value' ) @functools.wraps(func) def _perf_fn(*args, **kwargs): return _DeferredPerformanceExpression( func, units, *args, **kwargs ) _perf_key = perf_key if perf_key else func.__name__ setattr(_perf_fn, '_rfm_perf_key', _perf_key) return _perf_fn return _deco_wrapper namespace['performance_function'] = performance_function namespace['_rfm_perf_fns'] = namespaces.LocalNamespace() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): '''Remove directives from the class namespace. It does not make sense to have some directives available after the class was created or even at the instance level (e.g. doing ``self.parameter([1, 2, 3])`` does not make sense). So here, we intercept those directives out of the namespace before the class is constructed. ''' directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', 'final', 'performance_function' ] for b in directives: namespace.pop(b, None) # Reset the external functions imported through the bind directive. for item in namespace.pop('_rfm_ext_bound'): namespace.reset(item) return super().__new__(metacls, name, bases, dict(namespace), **kwargs) def __init__(cls, name, bases, namespace, **kwargs): super().__init__(name, bases, namespace, **kwargs) # Create a set with the attribute names already in use. cls._rfm_dir = set() for base in (b for b in bases if hasattr(b, '_rfm_dir')): cls._rfm_dir.update(base._rfm_dir) used_attribute_names = set(cls._rfm_dir) # Build the var space and extend the target namespace variables.VarSpace(cls, used_attribute_names) used_attribute_names.update(cls._rfm_var_space.vars) # Build the parameter space parameters.ParamSpace(cls, used_attribute_names) # Update used names set with the local __dict__ cls._rfm_dir.update(cls.__dict__) # Update the hook registry with the bases for base in cls._rfm_bases: cls._rfm_hook_registry.update( base._rfm_hook_registry, denied_hooks=namespace ) # Search the bases if no local sanity functions exist. if '_rfm_sanity' not in namespace: for base in cls._rfm_bases: if hasattr(base, '_rfm_sanity'): cls._rfm_sanity = getattr(base, '_rfm_sanity') if cls._rfm_sanity.__name__ in namespace: raise ReframeSyntaxError( f'{cls.__qualname__!r} overrides the candidate ' f'sanity function ' f'{cls._rfm_sanity.__qualname__!r} without ' f'defining an alternative' ) break # Update the performance function dict with the bases. for base in cls._rfm_bases: for k, v in base._rfm_perf_fns.items(): if k not in namespace: try: cls._rfm_perf_fns[k] = v except KeyError: '''Performance function overridden by other class''' # Add the final functions from its parents cls._rfm_final_methods.update( *(b._rfm_final_methods for b in cls._rfm_bases) ) if getattr(cls, '_rfm_override_final', None): return for b in cls._rfm_bases: for key in b._rfm_final_methods: if key in namespace and callable(namespace[key]): msg = (f"'{cls.__qualname__}.{key}' attempts to " f"override final method " f"'{b.__qualname__}.{key}'; " f"you should use the pipeline hooks instead") raise ReframeSyntaxError(msg) def __call__(cls, *args, **kwargs): '''Inject parameter and variable spaces during object construction. When a class is instantiated, this method intercepts the arguments associated to the parameter and variable spaces. This prevents both :func:`__new__` and :func:`__init__` methods from ever seing these arguments. The parameter and variable spaces are injected into the object after construction and before initialization. ''' # Intercept constructor arguments _rfm_use_params = kwargs.pop('_rfm_use_params', False) obj = cls.__new__(cls, *args, **kwargs) # Insert the var & param spaces cls._rfm_var_space.inject(obj, cls) cls._rfm_param_space.inject(obj, cls, _rfm_use_params) obj.__init__(*args, **kwargs) return obj def __getattribute__(cls, name): '''Attribute lookup method for custom class attributes. ReFrame test variables are descriptors injected at the class level. If a variable descriptor has already been injected into the class, do not return the descriptor object and return the default value associated with that variable instead. .. warning:: .. versionchanged:: 3.7.0 Prior versions exposed the variable descriptor object if this was already present in the class, instead of returning the variable's default value. ''' try: var_space = super().__getattribute__('_rfm_var_space') except AttributeError: var_space = None # If the variable is already injected, delegate lookup to __getattr__. if var_space and name in var_space.injected_vars: raise AttributeError('delegate variable lookup to __getattr__') # Default back to the base method if no special treatment required. return super().__getattribute__(name) # MASKED: __getattr__ function (lines 473-500) def setvar(cls, name, value): '''Set the value of a variable. :param name: The name of the variable. :param value: The value of the variable. :returns: :class:`True` if the variable was set. A variable will *not* be set, if it does not exist or when an attempt is made to set it with its underlying descriptor. This happens during the variable injection time and it should be delegated to the class' :func:`__setattr__` method. :raises ReframeSyntaxError: If an attempt is made to override a variable with a descriptor other than its underlying one. ''' try: var_space = super().__getattribute__('_rfm_var_space') if name in var_space: if not hasattr(value, '__get__'): var_space[name].define(value) return True elif var_space[name].field is not value: desc = '.'.join([cls.__qualname__, name]) raise ReframeSyntaxError( f'cannot override variable descriptor {desc!r}' ) else: # Variable is being injected return False except AttributeError: '''Catch early access attempt to the variable space.''' return False def __setattr__(cls, name, value): '''Handle the special treatment required for variables and parameters. A variable's default value can be updated when accessed as a regular class attribute. This behavior does not apply when the assigned value is a descriptor object. In that case, the task of setting the value is delegated to the base :func:`__setattr__` (this is to comply with standard Python behavior). However, since the variables are already descriptors which are injected during class instantiation, we disallow any attempt to override this descriptor (since it would be silently re-overridden in any case). Altering the value of a parameter when accessed as a class attribute is not allowed. This would break the parameter space internals. ''' # Try to treat `name` as variable if cls.setvar(name, value): return # Try to treat `name` as a parameter try: # Catch attempts to override a test parameter param_space = super().__getattribute__('_rfm_param_space') if name in param_space.params: raise ReframeSyntaxError(f'cannot override parameter {name!r}') except AttributeError: '''Catch early access attempt to the parameter space.''' # Treat `name` as normal class attribute super().__setattr__(name, value) @property def param_space(cls): ''' Make the parameter space available as read-only.''' return cls._rfm_param_space def is_abstract(cls): '''Check if the class is an abstract test. This is the case when some parameters are undefined, which results in the length of the parameter space being 0. :return: bool indicating whether the test has undefined parameters. :meta private: ''' return len(cls.param_space) == 0
def __getattr__(cls, name): '''Backup attribute lookup method into custom namespaces. Some ReFrame built-in types are stored under their own sub-namespaces. This method will perform an attribute lookup on these sub-namespaces if a call to the default :func:`__getattribute__` method fails to retrieve the requested class attribute. ''' try: var_space = super().__getattribute__('_rfm_var_space') return var_space.vars[name] except AttributeError: '''Catch early access attempt to the variable space.''' except KeyError: '''Requested name not in variable space.''' try: param_space = super().__getattribute__('_rfm_param_space') return param_space.params[name] except AttributeError: '''Catch early access attempt to the parameter space.''' except KeyError: '''Requested name not in parameter space.''' raise AttributeError( f'class {cls.__qualname__!r} has no attribute {name!r}' ) from None
473
500
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # # Meta-class for creating regression tests. # import functools import types import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression class RegressionTestMeta(type): class MetaNamespace(namespaces.LocalNamespace): '''Custom namespace to control the cls attribute assignment. Regular Python class attributes can be overridden by either parameters or variables respecting the order of execution. A variable or a parameter may not be declared more than once in the same class body. Overriding a variable with a parameter or the other way around has an undefined behavior. A variable's value may be updated multiple times within the same class body. A parameter's value cannot be updated more than once within the same class body. ''' def __setitem__(self, key, value): if isinstance(value, variables.TestVar): # Insert the attribute in the variable namespace try: self['_rfm_local_var_space'][key] = value value.__set_name__(self, key) except KeyError: raise ReframeSyntaxError( f'variable {key!r} is already declared' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: self['_rfm_local_param_space'][key] = value except KeyError: raise ReframeSyntaxError( f'parameter {key!r} is already declared in this class' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot override parameter {key!r}' ) else: # Insert the items manually to overide the namespace clash # check from the base namespace. self._namespace[key] = value # Register functions decorated with either @sanity_function or # @performance_variables or @performance_function decorators. if hasattr(value, '_rfm_sanity_fn'): try: super().__setitem__('_rfm_sanity', value) except KeyError: raise ReframeSyntaxError( 'the @sanity_function decorator can only be used ' 'once in the class body' ) from None elif hasattr(value, '_rfm_perf_key'): try: self['_rfm_perf_fns'][key] = value except KeyError: raise ReframeSyntaxError( f'the performance function {key!r} has already been ' f'defined in this class' ) from None # Register the final methods if hasattr(value, '_rfm_final'): self['_rfm_final_methods'].add(key) # Register the hooks - if a value does not meet the conditions # it will be simply ignored self['_rfm_hook_registry'].add(value) def __getitem__(self, key): '''Expose and control access to the local namespaces. Variables may only be retrieved if their value has been previously set. Accessing a parameter in the class body is disallowed (the actual test parameter is set during the class instantiation). ''' try: return super().__getitem__(key) except KeyError as err: try: # Handle variable access return self['_rfm_local_var_space'][key] except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: raise ReframeSyntaxError( 'accessing a test parameter from the class ' 'body is disallowed' ) from None else: # As the last resource, look if key is a variable in # any of the base classes. If so, make its value # available in the current class' namespace. for b in self['_rfm_bases']: if key in b._rfm_var_space: # Store a deep-copy of the variable's # value and return. v = b._rfm_var_space[key].default_value self._namespace[key] = v return self._namespace[key] # If 'key' is neither a variable nor a parameter, # raise the exception from the base __getitem__. raise err from None def reset(self, key): '''Reset an item to rerun it through the __setitem__ logic.''' self[key] = self[key] class WrappedFunction: '''Descriptor to wrap a free function as a bound-method. The free function object is wrapped by the constructor. Instances of this class should be inserted into the namespace of the target class with the desired name for the bound-method. Since this class is a descriptor, the `__get__` method will return the right bound-method when accessed from a class instance. :meta private: ''' __slots__ = ('fn') def __init__(self, fn, name=None): @functools.wraps(fn) def _fn(*args, **kwargs): return fn(*args, **kwargs) self.fn = _fn if name: self.fn.__name__ = name def __get__(self, obj, objtype=None): if objtype is None: objtype = type(obj) self.fn.__qualname__ = '.'.join( [objtype.__qualname__, self.fn.__name__] ) if obj is None: return self.fn return types.MethodType(self.fn, obj) def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) def __getattr__(self, name): if name in self.__slots__: return super().__getattr__(name) else: return getattr(self.fn, name) def __setattr__(self, name, value): if name in self.__slots__: super().__setattr__(name, value) else: setattr(self.fn, name, value) @classmethod def __prepare__(metacls, name, bases, **kwargs): namespace = super().__prepare__(name, bases, **kwargs) # Keep reference to the bases inside the namespace namespace['_rfm_bases'] = [ b for b in bases if hasattr(b, '_rfm_var_space') ] # Regression test parameter space defined at the class level local_param_space = namespaces.LocalNamespace() namespace['_rfm_local_param_space'] = local_param_space # Directive to insert a regression test parameter directly in the # class body as: `P0 = parameter([0,1,2,3])`. namespace['parameter'] = parameters.TestParam # Regression test var space defined at the class level local_var_space = namespaces.LocalNamespace() namespace['_rfm_local_var_space'] = local_var_space # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined # Utility decorators namespace['_rfm_ext_bound'] = set() def bind(fn, name=None): '''Directive to bind a free function to a class. See online docs for more information. .. note:: Functions bound using this directive must be re-inspected after the class body execution has completed. This directive attaches the external method into the class namespace and returns the associated instance of the :class:`WrappedFunction`. However, this instance may be further modified by other ReFrame builtins such as :func:`run_before`, :func:`run_after`, :func:`final` and so on after it was added to the namespace, which would bypass the logic implemented in the :func:`__setitem__` method from the :class:`MetaNamespace` class. Hence, we track the items set by this directive in the ``_rfm_ext_bound`` set, so they can be later re-inspected. ''' inst = metacls.WrappedFunction(fn, name) namespace[inst.__name__] = inst # Track the imported external functions namespace['_rfm_ext_bound'].add(inst.__name__) return inst def final(fn): '''Indicate that a function is final and cannot be overridden.''' fn._rfm_final = True return fn namespace['bind'] = bind namespace['final'] = final namespace['_rfm_final_methods'] = set() # Hook-related functionality def run_before(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('pre_' + stage) def run_after(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('post_' + stage) namespace['run_before'] = run_before namespace['run_after'] = run_after namespace['require_deps'] = hooks.require_deps namespace['_rfm_hook_registry'] = hooks.HookRegistry() # Machinery to add a sanity function def sanity_function(fn): '''Mark a function as the test's sanity function. Decorated functions must be unary and they will be converted into deferred expressions. ''' _def_fn = deferrable(fn) setattr(_def_fn, '_rfm_sanity_fn', True) return _def_fn namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable # Machinery to add performance functions def performance_function(units, *, perf_key=None): '''Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. The ``perf_key`` optional arg will be used as the name of the performance variable. If not provided, the function name will be used as the performance variable name. ''' if not isinstance(units, str): raise TypeError('performance units must be a string') if perf_key and not isinstance(perf_key, str): raise TypeError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): raise TypeError( f'performance function {func.__name__!r} has more ' f'than one argument without a default value' ) @functools.wraps(func) def _perf_fn(*args, **kwargs): return _DeferredPerformanceExpression( func, units, *args, **kwargs ) _perf_key = perf_key if perf_key else func.__name__ setattr(_perf_fn, '_rfm_perf_key', _perf_key) return _perf_fn return _deco_wrapper namespace['performance_function'] = performance_function namespace['_rfm_perf_fns'] = namespaces.LocalNamespace() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): '''Remove directives from the class namespace. It does not make sense to have some directives available after the class was created or even at the instance level (e.g. doing ``self.parameter([1, 2, 3])`` does not make sense). So here, we intercept those directives out of the namespace before the class is constructed. ''' directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', 'final', 'performance_function' ] for b in directives: namespace.pop(b, None) # Reset the external functions imported through the bind directive. for item in namespace.pop('_rfm_ext_bound'): namespace.reset(item) return super().__new__(metacls, name, bases, dict(namespace), **kwargs) def __init__(cls, name, bases, namespace, **kwargs): super().__init__(name, bases, namespace, **kwargs) # Create a set with the attribute names already in use. cls._rfm_dir = set() for base in (b for b in bases if hasattr(b, '_rfm_dir')): cls._rfm_dir.update(base._rfm_dir) used_attribute_names = set(cls._rfm_dir) # Build the var space and extend the target namespace variables.VarSpace(cls, used_attribute_names) used_attribute_names.update(cls._rfm_var_space.vars) # Build the parameter space parameters.ParamSpace(cls, used_attribute_names) # Update used names set with the local __dict__ cls._rfm_dir.update(cls.__dict__) # Update the hook registry with the bases for base in cls._rfm_bases: cls._rfm_hook_registry.update( base._rfm_hook_registry, denied_hooks=namespace ) # Search the bases if no local sanity functions exist. if '_rfm_sanity' not in namespace: for base in cls._rfm_bases: if hasattr(base, '_rfm_sanity'): cls._rfm_sanity = getattr(base, '_rfm_sanity') if cls._rfm_sanity.__name__ in namespace: raise ReframeSyntaxError( f'{cls.__qualname__!r} overrides the candidate ' f'sanity function ' f'{cls._rfm_sanity.__qualname__!r} without ' f'defining an alternative' ) break # Update the performance function dict with the bases. for base in cls._rfm_bases: for k, v in base._rfm_perf_fns.items(): if k not in namespace: try: cls._rfm_perf_fns[k] = v except KeyError: '''Performance function overridden by other class''' # Add the final functions from its parents cls._rfm_final_methods.update( *(b._rfm_final_methods for b in cls._rfm_bases) ) if getattr(cls, '_rfm_override_final', None): return for b in cls._rfm_bases: for key in b._rfm_final_methods: if key in namespace and callable(namespace[key]): msg = (f"'{cls.__qualname__}.{key}' attempts to " f"override final method " f"'{b.__qualname__}.{key}'; " f"you should use the pipeline hooks instead") raise ReframeSyntaxError(msg) def __call__(cls, *args, **kwargs): '''Inject parameter and variable spaces during object construction. When a class is instantiated, this method intercepts the arguments associated to the parameter and variable spaces. This prevents both :func:`__new__` and :func:`__init__` methods from ever seing these arguments. The parameter and variable spaces are injected into the object after construction and before initialization. ''' # Intercept constructor arguments _rfm_use_params = kwargs.pop('_rfm_use_params', False) obj = cls.__new__(cls, *args, **kwargs) # Insert the var & param spaces cls._rfm_var_space.inject(obj, cls) cls._rfm_param_space.inject(obj, cls, _rfm_use_params) obj.__init__(*args, **kwargs) return obj def __getattribute__(cls, name): '''Attribute lookup method for custom class attributes. ReFrame test variables are descriptors injected at the class level. If a variable descriptor has already been injected into the class, do not return the descriptor object and return the default value associated with that variable instead. .. warning:: .. versionchanged:: 3.7.0 Prior versions exposed the variable descriptor object if this was already present in the class, instead of returning the variable's default value. ''' try: var_space = super().__getattribute__('_rfm_var_space') except AttributeError: var_space = None # If the variable is already injected, delegate lookup to __getattr__. if var_space and name in var_space.injected_vars: raise AttributeError('delegate variable lookup to __getattr__') # Default back to the base method if no special treatment required. return super().__getattribute__(name) def __getattr__(cls, name): '''Backup attribute lookup method into custom namespaces. Some ReFrame built-in types are stored under their own sub-namespaces. This method will perform an attribute lookup on these sub-namespaces if a call to the default :func:`__getattribute__` method fails to retrieve the requested class attribute. ''' try: var_space = super().__getattribute__('_rfm_var_space') return var_space.vars[name] except AttributeError: '''Catch early access attempt to the variable space.''' except KeyError: '''Requested name not in variable space.''' try: param_space = super().__getattribute__('_rfm_param_space') return param_space.params[name] except AttributeError: '''Catch early access attempt to the parameter space.''' except KeyError: '''Requested name not in parameter space.''' raise AttributeError( f'class {cls.__qualname__!r} has no attribute {name!r}' ) from None def setvar(cls, name, value): '''Set the value of a variable. :param name: The name of the variable. :param value: The value of the variable. :returns: :class:`True` if the variable was set. A variable will *not* be set, if it does not exist or when an attempt is made to set it with its underlying descriptor. This happens during the variable injection time and it should be delegated to the class' :func:`__setattr__` method. :raises ReframeSyntaxError: If an attempt is made to override a variable with a descriptor other than its underlying one. ''' try: var_space = super().__getattribute__('_rfm_var_space') if name in var_space: if not hasattr(value, '__get__'): var_space[name].define(value) return True elif var_space[name].field is not value: desc = '.'.join([cls.__qualname__, name]) raise ReframeSyntaxError( f'cannot override variable descriptor {desc!r}' ) else: # Variable is being injected return False except AttributeError: '''Catch early access attempt to the variable space.''' return False def __setattr__(cls, name, value): '''Handle the special treatment required for variables and parameters. A variable's default value can be updated when accessed as a regular class attribute. This behavior does not apply when the assigned value is a descriptor object. In that case, the task of setting the value is delegated to the base :func:`__setattr__` (this is to comply with standard Python behavior). However, since the variables are already descriptors which are injected during class instantiation, we disallow any attempt to override this descriptor (since it would be silently re-overridden in any case). Altering the value of a parameter when accessed as a class attribute is not allowed. This would break the parameter space internals. ''' # Try to treat `name` as variable if cls.setvar(name, value): return # Try to treat `name` as a parameter try: # Catch attempts to override a test parameter param_space = super().__getattribute__('_rfm_param_space') if name in param_space.params: raise ReframeSyntaxError(f'cannot override parameter {name!r}') except AttributeError: '''Catch early access attempt to the parameter space.''' # Treat `name` as normal class attribute super().__setattr__(name, value) @property def param_space(cls): ''' Make the parameter space available as read-only.''' return cls._rfm_param_space def is_abstract(cls): '''Check if the class is an abstract test. This is the case when some parameters are undefined, which results in the length of the parameter space being 0. :return: bool indicating whether the test has undefined parameters. :meta private: ''' return len(cls.param_space) == 0
performance_function
Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. The ``perf_key`` optional arg will be used as the name of the performance variable. If not provided, the function name will be used as the performance variable name.
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # # Meta-class for creating regression tests. # import functools import types import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression class RegressionTestMeta(type): class MetaNamespace(namespaces.LocalNamespace): '''Custom namespace to control the cls attribute assignment. Regular Python class attributes can be overridden by either parameters or variables respecting the order of execution. A variable or a parameter may not be declared more than once in the same class body. Overriding a variable with a parameter or the other way around has an undefined behavior. A variable's value may be updated multiple times within the same class body. A parameter's value cannot be updated more than once within the same class body. ''' def __setitem__(self, key, value): if isinstance(value, variables.TestVar): # Insert the attribute in the variable namespace try: self['_rfm_local_var_space'][key] = value value.__set_name__(self, key) except KeyError: raise ReframeSyntaxError( f'variable {key!r} is already declared' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: self['_rfm_local_param_space'][key] = value except KeyError: raise ReframeSyntaxError( f'parameter {key!r} is already declared in this class' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot override parameter {key!r}' ) else: # Insert the items manually to overide the namespace clash # check from the base namespace. self._namespace[key] = value # Register functions decorated with either @sanity_function or # @performance_variables or @performance_function decorators. if hasattr(value, '_rfm_sanity_fn'): try: super().__setitem__('_rfm_sanity', value) except KeyError: raise ReframeSyntaxError( 'the @sanity_function decorator can only be used ' 'once in the class body' ) from None elif hasattr(value, '_rfm_perf_key'): try: self['_rfm_perf_fns'][key] = value except KeyError: raise ReframeSyntaxError( f'the performance function {key!r} has already been ' f'defined in this class' ) from None # Register the final methods if hasattr(value, '_rfm_final'): self['_rfm_final_methods'].add(key) # Register the hooks - if a value does not meet the conditions # it will be simply ignored self['_rfm_hook_registry'].add(value) def __getitem__(self, key): '''Expose and control access to the local namespaces. Variables may only be retrieved if their value has been previously set. Accessing a parameter in the class body is disallowed (the actual test parameter is set during the class instantiation). ''' try: return super().__getitem__(key) except KeyError as err: try: # Handle variable access return self['_rfm_local_var_space'][key] except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: raise ReframeSyntaxError( 'accessing a test parameter from the class ' 'body is disallowed' ) from None else: # As the last resource, look if key is a variable in # any of the base classes. If so, make its value # available in the current class' namespace. for b in self['_rfm_bases']: if key in b._rfm_var_space: # Store a deep-copy of the variable's # value and return. v = b._rfm_var_space[key].default_value self._namespace[key] = v return self._namespace[key] # If 'key' is neither a variable nor a parameter, # raise the exception from the base __getitem__. raise err from None def reset(self, key): '''Reset an item to rerun it through the __setitem__ logic.''' self[key] = self[key] class WrappedFunction: '''Descriptor to wrap a free function as a bound-method. The free function object is wrapped by the constructor. Instances of this class should be inserted into the namespace of the target class with the desired name for the bound-method. Since this class is a descriptor, the `__get__` method will return the right bound-method when accessed from a class instance. :meta private: ''' __slots__ = ('fn') def __init__(self, fn, name=None): @functools.wraps(fn) def _fn(*args, **kwargs): return fn(*args, **kwargs) self.fn = _fn if name: self.fn.__name__ = name def __get__(self, obj, objtype=None): if objtype is None: objtype = type(obj) self.fn.__qualname__ = '.'.join( [objtype.__qualname__, self.fn.__name__] ) if obj is None: return self.fn return types.MethodType(self.fn, obj) def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) def __getattr__(self, name): if name in self.__slots__: return super().__getattr__(name) else: return getattr(self.fn, name) def __setattr__(self, name, value): if name in self.__slots__: super().__setattr__(name, value) else: setattr(self.fn, name, value) @classmethod def __prepare__(metacls, name, bases, **kwargs): namespace = super().__prepare__(name, bases, **kwargs) # Keep reference to the bases inside the namespace namespace['_rfm_bases'] = [ b for b in bases if hasattr(b, '_rfm_var_space') ] # Regression test parameter space defined at the class level local_param_space = namespaces.LocalNamespace() namespace['_rfm_local_param_space'] = local_param_space # Directive to insert a regression test parameter directly in the # class body as: `P0 = parameter([0,1,2,3])`. namespace['parameter'] = parameters.TestParam # Regression test var space defined at the class level local_var_space = namespaces.LocalNamespace() namespace['_rfm_local_var_space'] = local_var_space # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined # Utility decorators namespace['_rfm_ext_bound'] = set() def bind(fn, name=None): '''Directive to bind a free function to a class. See online docs for more information. .. note:: Functions bound using this directive must be re-inspected after the class body execution has completed. This directive attaches the external method into the class namespace and returns the associated instance of the :class:`WrappedFunction`. However, this instance may be further modified by other ReFrame builtins such as :func:`run_before`, :func:`run_after`, :func:`final` and so on after it was added to the namespace, which would bypass the logic implemented in the :func:`__setitem__` method from the :class:`MetaNamespace` class. Hence, we track the items set by this directive in the ``_rfm_ext_bound`` set, so they can be later re-inspected. ''' inst = metacls.WrappedFunction(fn, name) namespace[inst.__name__] = inst # Track the imported external functions namespace['_rfm_ext_bound'].add(inst.__name__) return inst def final(fn): '''Indicate that a function is final and cannot be overridden.''' fn._rfm_final = True return fn namespace['bind'] = bind namespace['final'] = final namespace['_rfm_final_methods'] = set() # Hook-related functionality def run_before(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('pre_' + stage) def run_after(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('post_' + stage) namespace['run_before'] = run_before namespace['run_after'] = run_after namespace['require_deps'] = hooks.require_deps namespace['_rfm_hook_registry'] = hooks.HookRegistry() # Machinery to add a sanity function def sanity_function(fn): '''Mark a function as the test's sanity function. Decorated functions must be unary and they will be converted into deferred expressions. ''' _def_fn = deferrable(fn) setattr(_def_fn, '_rfm_sanity_fn', True) return _def_fn namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable # Machinery to add performance functions # MASKED: performance_function function (lines 293-325) namespace['performance_function'] = performance_function namespace['_rfm_perf_fns'] = namespaces.LocalNamespace() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): '''Remove directives from the class namespace. It does not make sense to have some directives available after the class was created or even at the instance level (e.g. doing ``self.parameter([1, 2, 3])`` does not make sense). So here, we intercept those directives out of the namespace before the class is constructed. ''' directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', 'final', 'performance_function' ] for b in directives: namespace.pop(b, None) # Reset the external functions imported through the bind directive. for item in namespace.pop('_rfm_ext_bound'): namespace.reset(item) return super().__new__(metacls, name, bases, dict(namespace), **kwargs) def __init__(cls, name, bases, namespace, **kwargs): super().__init__(name, bases, namespace, **kwargs) # Create a set with the attribute names already in use. cls._rfm_dir = set() for base in (b for b in bases if hasattr(b, '_rfm_dir')): cls._rfm_dir.update(base._rfm_dir) used_attribute_names = set(cls._rfm_dir) # Build the var space and extend the target namespace variables.VarSpace(cls, used_attribute_names) used_attribute_names.update(cls._rfm_var_space.vars) # Build the parameter space parameters.ParamSpace(cls, used_attribute_names) # Update used names set with the local __dict__ cls._rfm_dir.update(cls.__dict__) # Update the hook registry with the bases for base in cls._rfm_bases: cls._rfm_hook_registry.update( base._rfm_hook_registry, denied_hooks=namespace ) # Search the bases if no local sanity functions exist. if '_rfm_sanity' not in namespace: for base in cls._rfm_bases: if hasattr(base, '_rfm_sanity'): cls._rfm_sanity = getattr(base, '_rfm_sanity') if cls._rfm_sanity.__name__ in namespace: raise ReframeSyntaxError( f'{cls.__qualname__!r} overrides the candidate ' f'sanity function ' f'{cls._rfm_sanity.__qualname__!r} without ' f'defining an alternative' ) break # Update the performance function dict with the bases. for base in cls._rfm_bases: for k, v in base._rfm_perf_fns.items(): if k not in namespace: try: cls._rfm_perf_fns[k] = v except KeyError: '''Performance function overridden by other class''' # Add the final functions from its parents cls._rfm_final_methods.update( *(b._rfm_final_methods for b in cls._rfm_bases) ) if getattr(cls, '_rfm_override_final', None): return for b in cls._rfm_bases: for key in b._rfm_final_methods: if key in namespace and callable(namespace[key]): msg = (f"'{cls.__qualname__}.{key}' attempts to " f"override final method " f"'{b.__qualname__}.{key}'; " f"you should use the pipeline hooks instead") raise ReframeSyntaxError(msg) def __call__(cls, *args, **kwargs): '''Inject parameter and variable spaces during object construction. When a class is instantiated, this method intercepts the arguments associated to the parameter and variable spaces. This prevents both :func:`__new__` and :func:`__init__` methods from ever seing these arguments. The parameter and variable spaces are injected into the object after construction and before initialization. ''' # Intercept constructor arguments _rfm_use_params = kwargs.pop('_rfm_use_params', False) obj = cls.__new__(cls, *args, **kwargs) # Insert the var & param spaces cls._rfm_var_space.inject(obj, cls) cls._rfm_param_space.inject(obj, cls, _rfm_use_params) obj.__init__(*args, **kwargs) return obj def __getattribute__(cls, name): '''Attribute lookup method for custom class attributes. ReFrame test variables are descriptors injected at the class level. If a variable descriptor has already been injected into the class, do not return the descriptor object and return the default value associated with that variable instead. .. warning:: .. versionchanged:: 3.7.0 Prior versions exposed the variable descriptor object if this was already present in the class, instead of returning the variable's default value. ''' try: var_space = super().__getattribute__('_rfm_var_space') except AttributeError: var_space = None # If the variable is already injected, delegate lookup to __getattr__. if var_space and name in var_space.injected_vars: raise AttributeError('delegate variable lookup to __getattr__') # Default back to the base method if no special treatment required. return super().__getattribute__(name) def __getattr__(cls, name): '''Backup attribute lookup method into custom namespaces. Some ReFrame built-in types are stored under their own sub-namespaces. This method will perform an attribute lookup on these sub-namespaces if a call to the default :func:`__getattribute__` method fails to retrieve the requested class attribute. ''' try: var_space = super().__getattribute__('_rfm_var_space') return var_space.vars[name] except AttributeError: '''Catch early access attempt to the variable space.''' except KeyError: '''Requested name not in variable space.''' try: param_space = super().__getattribute__('_rfm_param_space') return param_space.params[name] except AttributeError: '''Catch early access attempt to the parameter space.''' except KeyError: '''Requested name not in parameter space.''' raise AttributeError( f'class {cls.__qualname__!r} has no attribute {name!r}' ) from None def setvar(cls, name, value): '''Set the value of a variable. :param name: The name of the variable. :param value: The value of the variable. :returns: :class:`True` if the variable was set. A variable will *not* be set, if it does not exist or when an attempt is made to set it with its underlying descriptor. This happens during the variable injection time and it should be delegated to the class' :func:`__setattr__` method. :raises ReframeSyntaxError: If an attempt is made to override a variable with a descriptor other than its underlying one. ''' try: var_space = super().__getattribute__('_rfm_var_space') if name in var_space: if not hasattr(value, '__get__'): var_space[name].define(value) return True elif var_space[name].field is not value: desc = '.'.join([cls.__qualname__, name]) raise ReframeSyntaxError( f'cannot override variable descriptor {desc!r}' ) else: # Variable is being injected return False except AttributeError: '''Catch early access attempt to the variable space.''' return False def __setattr__(cls, name, value): '''Handle the special treatment required for variables and parameters. A variable's default value can be updated when accessed as a regular class attribute. This behavior does not apply when the assigned value is a descriptor object. In that case, the task of setting the value is delegated to the base :func:`__setattr__` (this is to comply with standard Python behavior). However, since the variables are already descriptors which are injected during class instantiation, we disallow any attempt to override this descriptor (since it would be silently re-overridden in any case). Altering the value of a parameter when accessed as a class attribute is not allowed. This would break the parameter space internals. ''' # Try to treat `name` as variable if cls.setvar(name, value): return # Try to treat `name` as a parameter try: # Catch attempts to override a test parameter param_space = super().__getattribute__('_rfm_param_space') if name in param_space.params: raise ReframeSyntaxError(f'cannot override parameter {name!r}') except AttributeError: '''Catch early access attempt to the parameter space.''' # Treat `name` as normal class attribute super().__setattr__(name, value) @property def param_space(cls): ''' Make the parameter space available as read-only.''' return cls._rfm_param_space def is_abstract(cls): '''Check if the class is an abstract test. This is the case when some parameters are undefined, which results in the length of the parameter space being 0. :return: bool indicating whether the test has undefined parameters. :meta private: ''' return len(cls.param_space) == 0
def performance_function(units, *, perf_key=None): '''Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. The ``perf_key`` optional arg will be used as the name of the performance variable. If not provided, the function name will be used as the performance variable name. ''' if not isinstance(units, str): raise TypeError('performance units must be a string') if perf_key and not isinstance(perf_key, str): raise TypeError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): raise TypeError( f'performance function {func.__name__!r} has more ' f'than one argument without a default value' ) @functools.wraps(func) def _perf_fn(*args, **kwargs): return _DeferredPerformanceExpression( func, units, *args, **kwargs ) _perf_key = perf_key if perf_key else func.__name__ setattr(_perf_fn, '_rfm_perf_key', _perf_key) return _perf_fn return _deco_wrapper
293
325
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich) # ReFrame Project Developers. See the top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause # # Meta-class for creating regression tests. # import functools import types import reframe.core.namespaces as namespaces import reframe.core.parameters as parameters import reframe.core.variables as variables import reframe.core.hooks as hooks import reframe.utility as utils from reframe.core.exceptions import ReframeSyntaxError from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression class RegressionTestMeta(type): class MetaNamespace(namespaces.LocalNamespace): '''Custom namespace to control the cls attribute assignment. Regular Python class attributes can be overridden by either parameters or variables respecting the order of execution. A variable or a parameter may not be declared more than once in the same class body. Overriding a variable with a parameter or the other way around has an undefined behavior. A variable's value may be updated multiple times within the same class body. A parameter's value cannot be updated more than once within the same class body. ''' def __setitem__(self, key, value): if isinstance(value, variables.TestVar): # Insert the attribute in the variable namespace try: self['_rfm_local_var_space'][key] = value value.__set_name__(self, key) except KeyError: raise ReframeSyntaxError( f'variable {key!r} is already declared' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif isinstance(value, parameters.TestParam): # Insert the attribute in the parameter namespace try: self['_rfm_local_param_space'][key] = value except KeyError: raise ReframeSyntaxError( f'parameter {key!r} is already declared in this class' ) from None # Override the regular class attribute (if present) and return self._namespace.pop(key, None) return elif key in self['_rfm_local_param_space']: raise ReframeSyntaxError( f'cannot override parameter {key!r}' ) else: # Insert the items manually to overide the namespace clash # check from the base namespace. self._namespace[key] = value # Register functions decorated with either @sanity_function or # @performance_variables or @performance_function decorators. if hasattr(value, '_rfm_sanity_fn'): try: super().__setitem__('_rfm_sanity', value) except KeyError: raise ReframeSyntaxError( 'the @sanity_function decorator can only be used ' 'once in the class body' ) from None elif hasattr(value, '_rfm_perf_key'): try: self['_rfm_perf_fns'][key] = value except KeyError: raise ReframeSyntaxError( f'the performance function {key!r} has already been ' f'defined in this class' ) from None # Register the final methods if hasattr(value, '_rfm_final'): self['_rfm_final_methods'].add(key) # Register the hooks - if a value does not meet the conditions # it will be simply ignored self['_rfm_hook_registry'].add(value) def __getitem__(self, key): '''Expose and control access to the local namespaces. Variables may only be retrieved if their value has been previously set. Accessing a parameter in the class body is disallowed (the actual test parameter is set during the class instantiation). ''' try: return super().__getitem__(key) except KeyError as err: try: # Handle variable access return self['_rfm_local_var_space'][key] except KeyError: # Handle parameter access if key in self['_rfm_local_param_space']: raise ReframeSyntaxError( 'accessing a test parameter from the class ' 'body is disallowed' ) from None else: # As the last resource, look if key is a variable in # any of the base classes. If so, make its value # available in the current class' namespace. for b in self['_rfm_bases']: if key in b._rfm_var_space: # Store a deep-copy of the variable's # value and return. v = b._rfm_var_space[key].default_value self._namespace[key] = v return self._namespace[key] # If 'key' is neither a variable nor a parameter, # raise the exception from the base __getitem__. raise err from None def reset(self, key): '''Reset an item to rerun it through the __setitem__ logic.''' self[key] = self[key] class WrappedFunction: '''Descriptor to wrap a free function as a bound-method. The free function object is wrapped by the constructor. Instances of this class should be inserted into the namespace of the target class with the desired name for the bound-method. Since this class is a descriptor, the `__get__` method will return the right bound-method when accessed from a class instance. :meta private: ''' __slots__ = ('fn') def __init__(self, fn, name=None): @functools.wraps(fn) def _fn(*args, **kwargs): return fn(*args, **kwargs) self.fn = _fn if name: self.fn.__name__ = name def __get__(self, obj, objtype=None): if objtype is None: objtype = type(obj) self.fn.__qualname__ = '.'.join( [objtype.__qualname__, self.fn.__name__] ) if obj is None: return self.fn return types.MethodType(self.fn, obj) def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) def __getattr__(self, name): if name in self.__slots__: return super().__getattr__(name) else: return getattr(self.fn, name) def __setattr__(self, name, value): if name in self.__slots__: super().__setattr__(name, value) else: setattr(self.fn, name, value) @classmethod def __prepare__(metacls, name, bases, **kwargs): namespace = super().__prepare__(name, bases, **kwargs) # Keep reference to the bases inside the namespace namespace['_rfm_bases'] = [ b for b in bases if hasattr(b, '_rfm_var_space') ] # Regression test parameter space defined at the class level local_param_space = namespaces.LocalNamespace() namespace['_rfm_local_param_space'] = local_param_space # Directive to insert a regression test parameter directly in the # class body as: `P0 = parameter([0,1,2,3])`. namespace['parameter'] = parameters.TestParam # Regression test var space defined at the class level local_var_space = namespaces.LocalNamespace() namespace['_rfm_local_var_space'] = local_var_space # Directives to add/modify a regression test variable namespace['variable'] = variables.TestVar namespace['required'] = variables.Undefined # Utility decorators namespace['_rfm_ext_bound'] = set() def bind(fn, name=None): '''Directive to bind a free function to a class. See online docs for more information. .. note:: Functions bound using this directive must be re-inspected after the class body execution has completed. This directive attaches the external method into the class namespace and returns the associated instance of the :class:`WrappedFunction`. However, this instance may be further modified by other ReFrame builtins such as :func:`run_before`, :func:`run_after`, :func:`final` and so on after it was added to the namespace, which would bypass the logic implemented in the :func:`__setitem__` method from the :class:`MetaNamespace` class. Hence, we track the items set by this directive in the ``_rfm_ext_bound`` set, so they can be later re-inspected. ''' inst = metacls.WrappedFunction(fn, name) namespace[inst.__name__] = inst # Track the imported external functions namespace['_rfm_ext_bound'].add(inst.__name__) return inst def final(fn): '''Indicate that a function is final and cannot be overridden.''' fn._rfm_final = True return fn namespace['bind'] = bind namespace['final'] = final namespace['_rfm_final_methods'] = set() # Hook-related functionality def run_before(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('pre_' + stage) def run_after(stage): '''Decorator for attaching a test method to a given stage. See online docs for more information. ''' return hooks.attach_to('post_' + stage) namespace['run_before'] = run_before namespace['run_after'] = run_after namespace['require_deps'] = hooks.require_deps namespace['_rfm_hook_registry'] = hooks.HookRegistry() # Machinery to add a sanity function def sanity_function(fn): '''Mark a function as the test's sanity function. Decorated functions must be unary and they will be converted into deferred expressions. ''' _def_fn = deferrable(fn) setattr(_def_fn, '_rfm_sanity_fn', True) return _def_fn namespace['sanity_function'] = sanity_function namespace['deferrable'] = deferrable # Machinery to add performance functions def performance_function(units, *, perf_key=None): '''Decorate a function to extract a performance variable. The ``units`` argument indicates the units of the performance variable to be extracted. The ``perf_key`` optional arg will be used as the name of the performance variable. If not provided, the function name will be used as the performance variable name. ''' if not isinstance(units, str): raise TypeError('performance units must be a string') if perf_key and not isinstance(perf_key, str): raise TypeError("'perf_key' must be a string") def _deco_wrapper(func): if not utils.is_trivially_callable(func, non_def_args=1): raise TypeError( f'performance function {func.__name__!r} has more ' f'than one argument without a default value' ) @functools.wraps(func) def _perf_fn(*args, **kwargs): return _DeferredPerformanceExpression( func, units, *args, **kwargs ) _perf_key = perf_key if perf_key else func.__name__ setattr(_perf_fn, '_rfm_perf_key', _perf_key) return _perf_fn return _deco_wrapper namespace['performance_function'] = performance_function namespace['_rfm_perf_fns'] = namespaces.LocalNamespace() return metacls.MetaNamespace(namespace) def __new__(metacls, name, bases, namespace, **kwargs): '''Remove directives from the class namespace. It does not make sense to have some directives available after the class was created or even at the instance level (e.g. doing ``self.parameter([1, 2, 3])`` does not make sense). So here, we intercept those directives out of the namespace before the class is constructed. ''' directives = [ 'parameter', 'variable', 'bind', 'run_before', 'run_after', 'require_deps', 'required', 'deferrable', 'sanity_function', 'final', 'performance_function' ] for b in directives: namespace.pop(b, None) # Reset the external functions imported through the bind directive. for item in namespace.pop('_rfm_ext_bound'): namespace.reset(item) return super().__new__(metacls, name, bases, dict(namespace), **kwargs) def __init__(cls, name, bases, namespace, **kwargs): super().__init__(name, bases, namespace, **kwargs) # Create a set with the attribute names already in use. cls._rfm_dir = set() for base in (b for b in bases if hasattr(b, '_rfm_dir')): cls._rfm_dir.update(base._rfm_dir) used_attribute_names = set(cls._rfm_dir) # Build the var space and extend the target namespace variables.VarSpace(cls, used_attribute_names) used_attribute_names.update(cls._rfm_var_space.vars) # Build the parameter space parameters.ParamSpace(cls, used_attribute_names) # Update used names set with the local __dict__ cls._rfm_dir.update(cls.__dict__) # Update the hook registry with the bases for base in cls._rfm_bases: cls._rfm_hook_registry.update( base._rfm_hook_registry, denied_hooks=namespace ) # Search the bases if no local sanity functions exist. if '_rfm_sanity' not in namespace: for base in cls._rfm_bases: if hasattr(base, '_rfm_sanity'): cls._rfm_sanity = getattr(base, '_rfm_sanity') if cls._rfm_sanity.__name__ in namespace: raise ReframeSyntaxError( f'{cls.__qualname__!r} overrides the candidate ' f'sanity function ' f'{cls._rfm_sanity.__qualname__!r} without ' f'defining an alternative' ) break # Update the performance function dict with the bases. for base in cls._rfm_bases: for k, v in base._rfm_perf_fns.items(): if k not in namespace: try: cls._rfm_perf_fns[k] = v except KeyError: '''Performance function overridden by other class''' # Add the final functions from its parents cls._rfm_final_methods.update( *(b._rfm_final_methods for b in cls._rfm_bases) ) if getattr(cls, '_rfm_override_final', None): return for b in cls._rfm_bases: for key in b._rfm_final_methods: if key in namespace and callable(namespace[key]): msg = (f"'{cls.__qualname__}.{key}' attempts to " f"override final method " f"'{b.__qualname__}.{key}'; " f"you should use the pipeline hooks instead") raise ReframeSyntaxError(msg) def __call__(cls, *args, **kwargs): '''Inject parameter and variable spaces during object construction. When a class is instantiated, this method intercepts the arguments associated to the parameter and variable spaces. This prevents both :func:`__new__` and :func:`__init__` methods from ever seing these arguments. The parameter and variable spaces are injected into the object after construction and before initialization. ''' # Intercept constructor arguments _rfm_use_params = kwargs.pop('_rfm_use_params', False) obj = cls.__new__(cls, *args, **kwargs) # Insert the var & param spaces cls._rfm_var_space.inject(obj, cls) cls._rfm_param_space.inject(obj, cls, _rfm_use_params) obj.__init__(*args, **kwargs) return obj def __getattribute__(cls, name): '''Attribute lookup method for custom class attributes. ReFrame test variables are descriptors injected at the class level. If a variable descriptor has already been injected into the class, do not return the descriptor object and return the default value associated with that variable instead. .. warning:: .. versionchanged:: 3.7.0 Prior versions exposed the variable descriptor object if this was already present in the class, instead of returning the variable's default value. ''' try: var_space = super().__getattribute__('_rfm_var_space') except AttributeError: var_space = None # If the variable is already injected, delegate lookup to __getattr__. if var_space and name in var_space.injected_vars: raise AttributeError('delegate variable lookup to __getattr__') # Default back to the base method if no special treatment required. return super().__getattribute__(name) def __getattr__(cls, name): '''Backup attribute lookup method into custom namespaces. Some ReFrame built-in types are stored under their own sub-namespaces. This method will perform an attribute lookup on these sub-namespaces if a call to the default :func:`__getattribute__` method fails to retrieve the requested class attribute. ''' try: var_space = super().__getattribute__('_rfm_var_space') return var_space.vars[name] except AttributeError: '''Catch early access attempt to the variable space.''' except KeyError: '''Requested name not in variable space.''' try: param_space = super().__getattribute__('_rfm_param_space') return param_space.params[name] except AttributeError: '''Catch early access attempt to the parameter space.''' except KeyError: '''Requested name not in parameter space.''' raise AttributeError( f'class {cls.__qualname__!r} has no attribute {name!r}' ) from None def setvar(cls, name, value): '''Set the value of a variable. :param name: The name of the variable. :param value: The value of the variable. :returns: :class:`True` if the variable was set. A variable will *not* be set, if it does not exist or when an attempt is made to set it with its underlying descriptor. This happens during the variable injection time and it should be delegated to the class' :func:`__setattr__` method. :raises ReframeSyntaxError: If an attempt is made to override a variable with a descriptor other than its underlying one. ''' try: var_space = super().__getattribute__('_rfm_var_space') if name in var_space: if not hasattr(value, '__get__'): var_space[name].define(value) return True elif var_space[name].field is not value: desc = '.'.join([cls.__qualname__, name]) raise ReframeSyntaxError( f'cannot override variable descriptor {desc!r}' ) else: # Variable is being injected return False except AttributeError: '''Catch early access attempt to the variable space.''' return False def __setattr__(cls, name, value): '''Handle the special treatment required for variables and parameters. A variable's default value can be updated when accessed as a regular class attribute. This behavior does not apply when the assigned value is a descriptor object. In that case, the task of setting the value is delegated to the base :func:`__setattr__` (this is to comply with standard Python behavior). However, since the variables are already descriptors which are injected during class instantiation, we disallow any attempt to override this descriptor (since it would be silently re-overridden in any case). Altering the value of a parameter when accessed as a class attribute is not allowed. This would break the parameter space internals. ''' # Try to treat `name` as variable if cls.setvar(name, value): return # Try to treat `name` as a parameter try: # Catch attempts to override a test parameter param_space = super().__getattribute__('_rfm_param_space') if name in param_space.params: raise ReframeSyntaxError(f'cannot override parameter {name!r}') except AttributeError: '''Catch early access attempt to the parameter space.''' # Treat `name` as normal class attribute super().__setattr__(name, value) @property def param_space(cls): ''' Make the parameter space available as read-only.''' return cls._rfm_param_space def is_abstract(cls): '''Check if the class is an abstract test. This is the case when some parameters are undefined, which results in the length of the parameter space being 0. :return: bool indicating whether the test has undefined parameters. :meta private: ''' return len(cls.param_space) == 0
create_test_server
Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call.
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise # MASKED: create_test_server function (lines 232-284) @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image @classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id'] @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) @classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
@classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body
232
284
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise @classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image @classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id'] @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) @classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
recreate_server
Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server.
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise @classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image # MASKED: recreate_server function (lines 402-434) @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) @classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
@classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id']
402
434
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise @classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image @classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id'] @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) @classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
create_volume
Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume.
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise @classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image @classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id'] @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) # MASKED: create_volume function (lines 503-529) def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
@classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume
503
529
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise @classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image @classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id'] @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) @classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
attach_volume
Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume.
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise @classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image @classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id'] @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) @classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass # MASKED: attach_volume function (lines 548-585) def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment
548
585
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging from tempest.common import compute from tempest.common import waiters from tempest import config from tempest import exceptions from tempest.lib.common import api_microversion_fixture from tempest.lib.common import api_version_request from tempest.lib.common import api_version_utils from tempest.lib.common.utils import data_utils from tempest.lib.common.utils import test_utils from tempest.lib import exceptions as lib_exc import tempest.test CONF = config.CONF LOG = logging.getLogger(__name__) class BaseV2ComputeTest(api_version_utils.BaseMicroversionTest, tempest.test.BaseTestCase): """Base test case class for all Compute API tests.""" force_tenant_isolation = False # Set this to True in subclasses to create a default network. See # https://bugs.launchpad.net/tempest/+bug/1844568 create_default_network = False # TODO(andreaf) We should care also for the alt_manager here # but only once client lazy load in the manager is done credentials = ['primary'] @classmethod def skip_checks(cls): super(BaseV2ComputeTest, cls).skip_checks() if not CONF.service_available.nova: raise cls.skipException("Nova is not available") api_version_utils.check_skip_with_microversion( cls.min_microversion, cls.max_microversion, CONF.compute.min_microversion, CONF.compute.max_microversion) api_version_utils.check_skip_with_microversion( cls.volume_min_microversion, cls.volume_max_microversion, CONF.volume.min_microversion, CONF.volume.max_microversion) api_version_utils.check_skip_with_microversion( cls.placement_min_microversion, cls.placement_max_microversion, CONF.placement.min_microversion, CONF.placement.max_microversion) @classmethod def setup_credentials(cls): # Setting network=True, subnet=True creates a default network cls.set_network_resources( network=cls.create_default_network, subnet=cls.create_default_network) super(BaseV2ComputeTest, cls).setup_credentials() @classmethod def setup_clients(cls): super(BaseV2ComputeTest, cls).setup_clients() cls.servers_client = cls.os_primary.servers_client cls.server_groups_client = cls.os_primary.server_groups_client cls.flavors_client = cls.os_primary.flavors_client cls.compute_images_client = cls.os_primary.compute_images_client cls.extensions_client = cls.os_primary.extensions_client cls.floating_ip_pools_client = cls.os_primary.floating_ip_pools_client cls.floating_ips_client = cls.os_primary.compute_floating_ips_client cls.keypairs_client = cls.os_primary.keypairs_client cls.security_group_rules_client = ( cls.os_primary.compute_security_group_rules_client) cls.security_groups_client =\ cls.os_primary.compute_security_groups_client cls.quotas_client = cls.os_primary.quotas_client cls.compute_networks_client = cls.os_primary.compute_networks_client cls.limits_client = cls.os_primary.limits_client cls.volumes_extensions_client =\ cls.os_primary.volumes_extensions_client cls.snapshots_extensions_client =\ cls.os_primary.snapshots_extensions_client cls.interfaces_client = cls.os_primary.interfaces_client cls.fixed_ips_client = cls.os_primary.fixed_ips_client cls.availability_zone_client = cls.os_primary.availability_zone_client cls.agents_client = cls.os_primary.agents_client cls.aggregates_client = cls.os_primary.aggregates_client cls.services_client = cls.os_primary.services_client cls.instance_usages_audit_log_client = ( cls.os_primary.instance_usages_audit_log_client) cls.hypervisor_client = cls.os_primary.hypervisor_client cls.certificates_client = cls.os_primary.certificates_client cls.migrations_client = cls.os_primary.migrations_client cls.security_group_default_rules_client = ( cls.os_primary.security_group_default_rules_client) cls.versions_client = cls.os_primary.compute_versions_client if CONF.service_available.cinder: cls.volumes_client = cls.os_primary.volumes_client_latest cls.attachments_client = cls.os_primary.attachments_client_latest cls.snapshots_client = cls.os_primary.snapshots_client_latest if CONF.service_available.glance: if CONF.image_feature_enabled.api_v1: cls.images_client = cls.os_primary.image_client elif CONF.image_feature_enabled.api_v2: cls.images_client = cls.os_primary.image_client_v2 else: raise lib_exc.InvalidConfiguration( 'Either api_v1 or api_v2 must be True in ' '[image-feature-enabled].') cls._check_depends_on_nova_network() @classmethod def _check_depends_on_nova_network(cls): # Since nova-network APIs were removed from Nova in the Rocky release, # determine, based on the max version from the version document, if # the compute API is >Queens and if so, skip tests that rely on # nova-network. if not getattr(cls, 'depends_on_nova_network', False): return versions = cls.versions_client.list_versions()['versions'] # Find the v2.1 version which will tell us our max version for the # compute API we're testing against. for version in versions: if version['id'] == 'v2.1': max_version = api_version_request.APIVersionRequest( version['version']) break else: LOG.warning( 'Unable to determine max v2.1 compute API version: %s', versions) return # The max compute API version in Queens is 2.60 so we cap # at that version. queens = api_version_request.APIVersionRequest('2.60') if max_version > queens: raise cls.skipException('nova-network is gone') @classmethod def resource_setup(cls): super(BaseV2ComputeTest, cls).resource_setup() cls.request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) cls.volume_request_microversion = ( api_version_utils.select_request_microversion( cls.volume_min_microversion, CONF.volume.min_microversion)) cls.placement_request_microversion = ( api_version_utils.select_request_microversion( cls.placement_min_microversion, CONF.placement.min_microversion)) cls.build_interval = CONF.compute.build_interval cls.build_timeout = CONF.compute.build_timeout cls.image_ref = CONF.compute.image_ref cls.image_ref_alt = CONF.compute.image_ref_alt cls.flavor_ref = CONF.compute.flavor_ref cls.flavor_ref_alt = CONF.compute.flavor_ref_alt cls.ssh_user = CONF.validation.image_ssh_user cls.ssh_alt_user = CONF.validation.image_alt_ssh_user cls.image_ssh_user = CONF.validation.image_ssh_user cls.image_alt_ssh_user = CONF.validation.image_alt_ssh_user cls.image_ssh_password = CONF.validation.image_ssh_password cls.image_alt_ssh_password = CONF.validation.image_alt_ssh_password @classmethod def is_requested_microversion_compatible(cls, max_version): """Check the compatibility of selected request microversion This method will check if selected request microversion (cls.request_microversion) for test is compatible with respect to 'max_version'. Compatible means if selected request microversion is in the range(<=) of 'max_version'. :param max_version: maximum microversion to compare for compatibility. Example: '2.30' :returns: True if selected request microversion is compatible with 'max_version'. False in other case. """ try: req_version_obj = api_version_request.APIVersionRequest( cls.request_microversion) # NOTE(gmann): This is case where this method is used before calling # resource_setup(), where cls.request_microversion is set. There may # not be any such case but still we can handle this case. except AttributeError: request_microversion = ( api_version_utils.select_request_microversion( cls.min_microversion, CONF.compute.min_microversion)) req_version_obj = api_version_request.APIVersionRequest( request_microversion) max_version_obj = api_version_request.APIVersionRequest(max_version) return req_version_obj <= max_version_obj @classmethod def server_check_teardown(cls): """Checks is the shared server clean enough for subsequent test. Method will delete the server when it's dirty. The setUp method is responsible for creating a new server. Exceptions raised in tearDown class are fails the test case, This method supposed to use only by tearDown methods, when the shared server_id is stored in the server_id of the class. """ if getattr(cls, 'server_id', None) is not None: try: waiters.wait_for_server_status(cls.servers_client, cls.server_id, 'ACTIVE') except Exception as exc: LOG.exception(exc) cls.servers_client.delete_server(cls.server_id) waiters.wait_for_server_termination(cls.servers_client, cls.server_id) cls.server_id = None raise @classmethod def create_test_server(cls, validatable=False, volume_backed=False, validation_resources=None, clients=None, **kwargs): """Wrapper utility that returns a test server. This wrapper utility calls the common create test server and returns a test server. The purpose of this wrapper is to minimize the impact on the code of the tests already using this function. :param validatable: Whether the server will be pingable or sshable. :param volume_backed: Whether the instance is volume backed or not. :param validation_resources: Dictionary of validation resources as returned by `get_class_validation_resources`. :param clients: Client manager, defaults to os_primary. :param kwargs: Extra arguments are passed down to the `compute.create_test_server` call. """ if 'name' not in kwargs: kwargs['name'] = data_utils.rand_name(cls.__name__ + "-server") request_version = api_version_request.APIVersionRequest( cls.request_microversion) v2_37_version = api_version_request.APIVersionRequest('2.37') tenant_network = cls.get_tenant_network() # NOTE(snikitin): since microversion v2.37 'networks' field is required if (request_version >= v2_37_version and 'networks' not in kwargs and not tenant_network): kwargs['networks'] = 'none' if clients is None: clients = cls.os_primary body, servers = compute.create_test_server( clients, validatable, validation_resources=validation_resources, tenant_network=tenant_network, volume_backed=volume_backed, **kwargs) # For each server schedule wait and delete, so we first delete all # and then wait for all for server in servers: cls.addClassResourceCleanup(waiters.wait_for_server_termination, clients.servers_client, server['id']) for server in servers: cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, clients.servers_client.delete_server, server['id']) return body @classmethod def create_security_group(cls, name=None, description=None): if name is None: name = data_utils.rand_name(cls.__name__ + "-securitygroup") if description is None: description = data_utils.rand_name('description') body = cls.security_groups_client.create_security_group( name=name, description=description)['security_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.security_groups_client.delete_security_group, body['id']) return body @classmethod def create_test_server_group(cls, name="", policy=None): if not name: name = data_utils.rand_name(cls.__name__ + "-Server-Group") if policy is None: policy = ['affinity'] body = cls.server_groups_client.create_server_group( name=name, policies=policy)['server_group'] cls.addClassResourceCleanup( test_utils.call_and_ignore_notfound_exc, cls.server_groups_client.delete_server_group, body['id']) return body def wait_for(self, condition): """Repeatedly calls condition() until a timeout.""" start_time = int(time.time()) while True: try: condition() except Exception: pass else: return if int(time.time()) - start_time >= self.build_timeout: condition() return time.sleep(self.build_interval) @classmethod def prepare_instance_network(cls): if (CONF.validation.auth_method != 'disabled' and CONF.validation.connect_method == 'floating'): cls.set_network_resources(network=True, subnet=True, router=True, dhcp=True) @classmethod def create_image_from_server(cls, server_id, **kwargs): """Wrapper utility that returns an image created from the server. If compute microversion >= 2.36, the returned image response will be from the image service API rather than the compute image proxy API. """ name = kwargs.pop('name', data_utils.rand_name(cls.__name__ + "-image")) wait_until = kwargs.pop('wait_until', None) wait_for_server = kwargs.pop('wait_for_server', True) image = cls.compute_images_client.create_image(server_id, name=name, **kwargs) if api_version_utils.compare_version_header_to_response( "OpenStack-API-Version", "compute 2.45", image.response, "lt"): image_id = image['image_id'] else: image_id = data_utils.parse_image_id(image.response['location']) # The compute image proxy APIs were deprecated in 2.35 so # use the images client directly if the API microversion being # used is >=2.36. if not cls.is_requested_microversion_compatible('2.35'): client = cls.images_client else: client = cls.compute_images_client cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, client.delete_image, image_id) if wait_until is not None: try: wait_until = wait_until.upper() if not cls.is_requested_microversion_compatible('2.35'): wait_until = wait_until.lower() waiters.wait_for_image_status(client, image_id, wait_until) except lib_exc.NotFound: if wait_until.upper() == 'ACTIVE': # If the image is not found after create_image returned # that means the snapshot failed in nova-compute and nova # deleted the image. There should be a compute fault # recorded with the server in that case, so get the server # and dump some details. server = ( cls.servers_client.show_server(server_id)['server']) if 'fault' in server: raise exceptions.SnapshotNotFoundException( server['fault'], image_id=image_id) else: raise exceptions.SnapshotNotFoundException( image_id=image_id) else: raise image = client.show_image(image_id) # Compute image client returns response wrapped in 'image' element # which is not the case with Glance image client. if 'image' in image: image = image['image'] if wait_until.upper() == 'ACTIVE': if wait_for_server: waiters.wait_for_server_status(cls.servers_client, server_id, 'ACTIVE') return image @classmethod def recreate_server(cls, server_id, validatable=False, **kwargs): """Destroy an existing class level server and creates a new one Some test classes use a test server that can be used by multiple tests. This is done to optimise runtime and test load. If something goes wrong with the test server, it can be rebuilt using this helper. This helper can also be used for the initial provisioning if no server_id is specified. :param server_id: UUID of the server to be rebuilt. If None is specified, a new server is provisioned. :param validatable: whether to the server needs to be validatable. When True, validation resources are acquired via the `get_class_validation_resources` helper. :param kwargs: extra paramaters are passed through to the `create_test_server` call. :return: the UUID of the created server. """ if server_id: cls.delete_server(server_id) cls.password = data_utils.rand_password() server = cls.create_test_server( validatable, validation_resources=cls.get_class_validation_resources( cls.os_primary), wait_until='ACTIVE', adminPass=cls.password, **kwargs) return server['id'] @classmethod def delete_server(cls, server_id): """Deletes an existing server and waits for it to be gone.""" try: cls.servers_client.delete_server(server_id) waiters.wait_for_server_termination(cls.servers_client, server_id) except Exception: LOG.exception('Failed to delete server %s', server_id) def resize_server(self, server_id, new_flavor_id, **kwargs): """resize and confirm_resize an server, waits for it to be ACTIVE.""" self.servers_client.resize_server(server_id, new_flavor_id, **kwargs) waiters.wait_for_server_status(self.servers_client, server_id, 'VERIFY_RESIZE') self.servers_client.confirm_resize_server(server_id) waiters.wait_for_server_status( self.servers_client, server_id, 'ACTIVE') server = self.servers_client.show_server(server_id)['server'] self.assert_flavor_equal(new_flavor_id, server['flavor']) @classmethod def delete_volume(cls, volume_id): """Deletes the given volume and waits for it to be gone.""" try: cls.volumes_client.delete_volume(volume_id) # TODO(mriedem): We should move the wait_for_resource_deletion # into the delete_volume method as a convenience to the caller. cls.volumes_client.wait_for_resource_deletion(volume_id) except lib_exc.NotFound: LOG.warning("Unable to delete volume '%s' since it was not found. " "Maybe it was already deleted?", volume_id) @classmethod def get_server_ip(cls, server, validation_resources=None): """Get the server fixed or floating IP. Based on the configuration we're in, return a correct ip address for validating that a guest is up. :param server: The server dict as returned by the API :param validation_resources: The dict of validation resources provisioned for the server. """ if CONF.validation.connect_method == 'floating': if validation_resources: return validation_resources['floating_ip']['ip'] else: msg = ('When validation.connect_method equals floating, ' 'validation_resources cannot be None') raise lib_exc.InvalidParam(invalid_param=msg) elif CONF.validation.connect_method == 'fixed': addresses = server['addresses'][CONF.validation.network_for_ssh] for address in addresses: if address['version'] == CONF.validation.ip_version_for_ssh: return address['addr'] raise exceptions.ServerUnreachable(server_id=server['id']) else: raise lib_exc.InvalidConfiguration() def setUp(self): super(BaseV2ComputeTest, self).setUp() self.useFixture(api_microversion_fixture.APIMicroversionFixture( compute_microversion=self.request_microversion, volume_microversion=self.volume_request_microversion, placement_microversion=self.placement_request_microversion)) @classmethod def create_volume(cls, image_ref=None, **kwargs): """Create a volume and wait for it to become 'available'. :param image_ref: Specify an image id to create a bootable volume. :param kwargs: other parameters to create volume. :returns: The available volume. """ if 'size' not in kwargs: kwargs['size'] = CONF.volume.volume_size if 'display_name' not in kwargs: vol_name = data_utils.rand_name(cls.__name__ + '-volume') kwargs['display_name'] = vol_name if image_ref is not None: kwargs['imageRef'] = image_ref if CONF.compute.compute_volume_common_az: kwargs.setdefault('availability_zone', CONF.compute.compute_volume_common_az) volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.addClassResourceCleanup( cls.volumes_client.wait_for_resource_deletion, volume['id']) cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc, cls.volumes_client.delete_volume, volume['id']) waiters.wait_for_volume_resource_status(cls.volumes_client, volume['id'], 'available') return volume def _detach_volume(self, server, volume): """Helper method to detach a volume. Ignores 404 responses if the volume or server do not exist, or the volume is already detached from the server. """ try: volume = self.volumes_client.show_volume(volume['id'])['volume'] # Check the status. You can only detach an in-use volume, otherwise # the compute API will return a 400 response. if volume['status'] == 'in-use': self.servers_client.detach_volume(server['id'], volume['id']) except lib_exc.NotFound: # Ignore 404s on detach in case the server is deleted or the volume # is already detached. pass def attach_volume(self, server, volume, device=None, tag=None): """Attaches volume to server and waits for 'in-use' volume status. The volume will be detached when the test tears down. :param server: The server to which the volume will be attached. :param volume: The volume to attach. :param device: Optional mountpoint for the attached volume. Note that this is not guaranteed for all hypervisors and is not recommended. :param tag: Optional device role tag to apply to the volume. """ attach_kwargs = dict(volumeId=volume['id']) if device: attach_kwargs['device'] = device if tag: attach_kwargs['tag'] = tag attachment = self.servers_client.attach_volume( server['id'], **attach_kwargs)['volumeAttachment'] # On teardown detach the volume and for multiattach volumes wait for # the attachment to be removed. For non-multiattach volumes wait for # the state of the volume to change to available. This is so we don't # error out when trying to delete the volume during teardown. if volume['multiattach']: att = waiters.wait_for_volume_attachment_create( self.volumes_client, volume['id'], server['id']) self.addCleanup(waiters.wait_for_volume_attachment_remove, self.volumes_client, volume['id'], att['attachment_id']) else: self.addCleanup(waiters.wait_for_volume_resource_status, self.volumes_client, volume['id'], 'available') waiters.wait_for_volume_resource_status(self.volumes_client, volume['id'], 'in-use') # Ignore 404s on detach in case the server is deleted or the volume # is already detached. self.addCleanup(self._detach_volume, server, volume) return attachment def create_volume_snapshot(self, volume_id, name=None, description=None, metadata=None, force=False): name = name or data_utils.rand_name( self.__class__.__name__ + '-snapshot') snapshot = self.snapshots_client.create_snapshot( volume_id=volume_id, force=force, display_name=name, description=description, metadata=metadata)['snapshot'] self.addCleanup(self.snapshots_client.wait_for_resource_deletion, snapshot['id']) self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id']) waiters.wait_for_volume_resource_status(self.snapshots_client, snapshot['id'], 'available') snapshot = self.snapshots_client.show_snapshot( snapshot['id'])['snapshot'] return snapshot def assert_flavor_equal(self, flavor_id, server_flavor): """Check whether server_flavor equals to flavor. :param flavor_id: flavor id :param server_flavor: flavor info returned by show_server. """ # Nova API > 2.46 no longer includes flavor.id, and schema check # will cover whether 'id' should be in flavor if server_flavor.get('id'): msg = ('server flavor is not same as flavor!') self.assertEqual(flavor_id, server_flavor['id'], msg) else: flavor = self.flavors_client.show_flavor(flavor_id)['flavor'] self.assertEqual(flavor['name'], server_flavor['original_name'], "original_name in server flavor is not same as " "flavor name!") for key in ['ram', 'vcpus', 'disk']: msg = ('attribute %s in server flavor is not same as ' 'flavor!' % key) self.assertEqual(flavor[key], server_flavor[key], msg) class BaseV2ComputeAdminTest(BaseV2ComputeTest): """Base test case class for Compute Admin API tests.""" credentials = ['primary', 'admin'] @classmethod def setup_clients(cls): super(BaseV2ComputeAdminTest, cls).setup_clients() cls.availability_zone_admin_client = ( cls.os_admin.availability_zone_client) cls.admin_flavors_client = cls.os_admin.flavors_client cls.admin_servers_client = cls.os_admin.servers_client cls.image_client = cls.os_admin.image_client_v2 cls.admin_assisted_volume_snapshots_client = \ cls.os_admin.assisted_volume_snapshots_client def create_flavor(self, ram, vcpus, disk, name=None, is_public='True', **kwargs): if name is None: name = data_utils.rand_name(self.__class__.__name__ + "-flavor") id = kwargs.pop('id', data_utils.rand_int_id(start=1000)) client = self.admin_flavors_client flavor = client.create_flavor( ram=ram, vcpus=vcpus, disk=disk, name=name, id=id, is_public=is_public, **kwargs)['flavor'] self.addCleanup(client.wait_for_resource_deletion, flavor['id']) self.addCleanup(client.delete_flavor, flavor['id']) return flavor @classmethod def get_host_for_server(cls, server_id): server_details = cls.admin_servers_client.show_server(server_id) return server_details['server']['OS-EXT-SRV-ATTR:host'] def get_host_other_than(self, server_id): source_host = self.get_host_for_server(server_id) svcs = self.os_admin.services_client.list_services( binary='nova-compute')['services'] hosts = [] for svc in svcs: if svc['state'] == 'up' and svc['status'] == 'enabled': if CONF.compute.compute_volume_common_az: if svc['zone'] == CONF.compute.compute_volume_common_az: hosts.append(svc['host']) else: hosts.append(svc['host']) for target_host in hosts: if source_host != target_host: return target_host
list
This method is a generator which yields queue objects. This is almost the copy of list method of resource.Resource class. The only difference is the request header now includes `Client-ID` and `X-PROJECT-ID` fields which are required by Zaqar v2 API.
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import resource class Queue(resource.Resource): # FIXME(anyone): The name string of `location` field of Zaqar API response # is lower case. That is inconsistent with the guide from API-WG. This is # a workaround for this issue. location = resource.Header("location") resources_key = "queues" base_path = "/queues" # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True # Properties #: The default TTL of messages defined for a queue, which will effect for #: any messages posted to the queue. default_message_ttl = resource.Body("_default_message_ttl") #: Description of the queue. description = resource.Body("description") #: The max post size of messages defined for a queue, which will effect #: for any messages posted to the queue. max_messages_post_size = resource.Body("_max_messages_post_size") #: Name of the queue. The name is the unique identity of a queue. It #: must not exceed 64 bytes in length, and it is limited to US-ASCII #: letters, digits, underscores, and hyphens. name = resource.Body("name", alternate_id=True) #: The ID to identify the client accessing Zaqar API. Must be specified #: in header for each API request. client_id = resource.Header("Client-ID") #: The ID to identify the project accessing Zaqar API. Must be specified #: in case keystone auth is not enabled in Zaqar service. project_id = resource.Header("X-PROJECT-ID") def create(self, session, prepend_key=True): request = self._prepare_request(requires_id=True, prepend_key=prepend_key) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id() } request.headers.update(headers) response = session.put(request.url, json=request.body, headers=request.headers) self._translate_response(response, has_body=False) return self # MASKED: list function (lines 67-106) def fetch(self, session, requires_id=True, error_message=None): request = self._prepare_request(requires_id=requires_id) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id() } request.headers.update(headers) response = session.get(request.url, headers=headers) self._translate_response(response) return self def delete(self, session): request = self._prepare_request() headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id() } request.headers.update(headers) response = session.delete(request.url, headers=headers) self._translate_response(response, has_body=False) return self
@classmethod def list(cls, session, paginated=False, **params): """This method is a generator which yields queue objects. This is almost the copy of list method of resource.Resource class. The only difference is the request header now includes `Client-ID` and `X-PROJECT-ID` fields which are required by Zaqar v2 API. """ more_data = True query_params = cls._query_mapping._transpose(params) uri = cls.base_path % params headers = { "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), "X-PROJECT-ID": params.get('project_id', None ) or session.get_project_id() } while more_data: resp = session.get(uri, headers=headers, params=query_params) resp = resp.json() resp = resp[cls.resources_key] if not resp: more_data = False yielded = 0 new_marker = None for data in resp: value = cls.existing(**data) new_marker = value.id yielded += 1 yield value if not paginated: return if "limit" in query_params and yielded < query_params["limit"]: return query_params["limit"] = yielded query_params["marker"] = new_marker
67
106
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from openstack import resource class Queue(resource.Resource): # FIXME(anyone): The name string of `location` field of Zaqar API response # is lower case. That is inconsistent with the guide from API-WG. This is # a workaround for this issue. location = resource.Header("location") resources_key = "queues" base_path = "/queues" # capabilities allow_create = True allow_list = True allow_fetch = True allow_delete = True # Properties #: The default TTL of messages defined for a queue, which will effect for #: any messages posted to the queue. default_message_ttl = resource.Body("_default_message_ttl") #: Description of the queue. description = resource.Body("description") #: The max post size of messages defined for a queue, which will effect #: for any messages posted to the queue. max_messages_post_size = resource.Body("_max_messages_post_size") #: Name of the queue. The name is the unique identity of a queue. It #: must not exceed 64 bytes in length, and it is limited to US-ASCII #: letters, digits, underscores, and hyphens. name = resource.Body("name", alternate_id=True) #: The ID to identify the client accessing Zaqar API. Must be specified #: in header for each API request. client_id = resource.Header("Client-ID") #: The ID to identify the project accessing Zaqar API. Must be specified #: in case keystone auth is not enabled in Zaqar service. project_id = resource.Header("X-PROJECT-ID") def create(self, session, prepend_key=True): request = self._prepare_request(requires_id=True, prepend_key=prepend_key) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id() } request.headers.update(headers) response = session.put(request.url, json=request.body, headers=request.headers) self._translate_response(response, has_body=False) return self @classmethod def list(cls, session, paginated=False, **params): """This method is a generator which yields queue objects. This is almost the copy of list method of resource.Resource class. The only difference is the request header now includes `Client-ID` and `X-PROJECT-ID` fields which are required by Zaqar v2 API. """ more_data = True query_params = cls._query_mapping._transpose(params) uri = cls.base_path % params headers = { "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), "X-PROJECT-ID": params.get('project_id', None ) or session.get_project_id() } while more_data: resp = session.get(uri, headers=headers, params=query_params) resp = resp.json() resp = resp[cls.resources_key] if not resp: more_data = False yielded = 0 new_marker = None for data in resp: value = cls.existing(**data) new_marker = value.id yielded += 1 yield value if not paginated: return if "limit" in query_params and yielded < query_params["limit"]: return query_params["limit"] = yielded query_params["marker"] = new_marker def fetch(self, session, requires_id=True, error_message=None): request = self._prepare_request(requires_id=requires_id) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id() } request.headers.update(headers) response = session.get(request.url, headers=headers) self._translate_response(response) return self def delete(self, session): request = self._prepare_request() headers = { "Client-ID": self.client_id or str(uuid.uuid4()), "X-PROJECT-ID": self.project_id or session.get_project_id() } request.headers.update(headers) response = session.delete(request.url, headers=headers) self._translate_response(response, has_body=False) return self
ComputeConvOutputShape
Computes output shape for convolution and pooling layers. If `in_shape` is a dynamic shape, the output will be Tensors, while if `in_shape` is a list of ints then the output will also be a list of ints. Args: in_shape: A length 4 Tensor or list representing the input shape. t_stride: The stride along the time dimension. f_stride: The stride along the frequency dimension. outc: The expected output channel. If None, will use the input channel. padding: 'SAME' or 'VALID'. Returns: The expected output shape.
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common conv layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from lingvo.core import base_layer from lingvo.core import bn_layers from lingvo.core import py_utils from lingvo.core import tshape # MASKED: ComputeConvOutputShape function (lines 30-67) def ComputeConvOutputPadding(paddings, window, stride, padding_algorithm='SAME'): """Computes paddings for convolution and pooling output. out_padding[i] == 1 iff any in_padding corresponding to that output is 1. Args: paddings: The paddings tensor. It is expected to be of shape [batch, time]. window: The size of the windows. stride: The time-stride between adjacent windows. padding_algorithm: 'SAME' or 'VALID'. Returns: out_padding, The new padding tensor of size [batch, ceil(time / stride)]. """ if stride == 1: return paddings # Pad so input_length divides stride. input_length = py_utils.GetShape(paddings)[1] pad_len = (input_length + stride - 1) // stride * stride - input_length paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0) out_padding = tf.nn.pool( tf.expand_dims(paddings, -1), [window], 'MAX', padding_algorithm, strides=[stride], ) return tf.squeeze(out_padding, -1) class BaseConv2DLayerWithPadding(base_layer.BaseLayer): """Base class for 2D convolution layers.""" @classmethod def Params(cls): p = super(BaseConv2DLayerWithPadding, cls).Params() p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' out_channel. For causal convolution, filter_shape[0]' ' is the actual number of trained weights in the time dimension' ' of the kernel.') p.Define( 'filter_stride', (1, 1), 'Filter stride to use. Must be a pair of ints. The first int' ' specifies the stride on the time dimension. The second int' ' specifies the stride on the frequency dimension.') p.Define( 'dilation_rate', (1, 1), 'If > 1, dilation rate for atrous convolution. ' 'Must be a pair of ints. ' 'The first int specifies the dilation rate on the time dimension. ' 'The second int specifies the dilation rate on the frequency ' 'dimension. ' 'If any value of dilation_rate is > 1, then all values of strides ' 'must be 1.') p.Define( 'weight_norm', False, 'If true, apply weight normalization to weights as proposed by' ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868') return p @base_layer.initializer def __init__(self, params): super(BaseConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name assert len(p.filter_shape) == 4 assert len(p.filter_stride) == 2 assert all(x > 0 for x in p.filter_shape) assert all(x > 0 for x in p.filter_stride) assert len(p.dilation_rate) == 2 assert all(x > 0 for x in p.dilation_rate) # Dilation and stride can't be combined. if any(x > 1 for x in p.dilation_rate): assert all(x == 1 for x in p.filter_stride) @property def output_channels(self): """The number of output channels for this conv layer.""" raise NotImplementedError() @property def input_channels(self): """The number of input channels for this conv layer.""" return self.params.filter_shape[2] def OutShape(self, in_shape): """Compute the output shape given the input shape.""" p = self.params return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) def FProp(self, theta, inputs, paddings): """Apply convolution to inputs. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. inputs: The inputs tensor. It is expected to be of shape [batch, time, frequency, channel]. The time dimension corresponds to the height dimension as in images and the frequency dimension corresponds to the width dimension as in images. paddings: The paddings tensor, expected to be of shape [batch, time]. Returns: outputs, out_paddings pair. """ p = self.params with tf.name_scope(p.name): inputs = py_utils.with_dependencies([ py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]), py_utils.assert_shape_match( tf.shape(inputs), tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0)) ], inputs) def _ApplyPadding(tensor_in, padding_in): padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1) return tensor_in * (1.0 - padding_expanded) # Zeroing out padded inputs. inputs = _ApplyPadding(inputs, paddings) # Evaluate the conv kernel on 'inputs'. out = self._EvaluateConvKernel(theta, inputs) # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1. # But there's likely no real problems. Trying to set it gives an error: # pooling with SAME padding is not implemented for dilation_rate > 1. # NOTE: we use window=p.filter_stride[0] to be compatible with legacy # implementation. Consider updating it to be the actual shape. conv_padding = ComputeConvOutputPadding( paddings, window=p.filter_stride[0], stride=p.filter_stride[0]) # Assuming padded nodes will be properly zero-ed out if necessary by # sub-sequent layers. # out = _ApplyPadding(out, conv_padding) out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs))) return out, conv_padding def _EvaluateConvKernel(self, theta, conv_input): """Evaluate the convolution kernel on input 'conv_input'.""" raise NotImplementedError class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding): """Conv2D layer.""" @base_layer.initializer def __init__(self, params): super(Conv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[-1]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[-1] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last dim (standard conv). filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape( (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding): """2D conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding): """Depthwise conv 2D layer. paper: https://arxiv.org/abs/1610.02357 """ @classmethod def Params(cls): p = super(DepthwiseConv2DLayer, cls).Params() # Redefine 'filter_shape' since the semantic of shape elements is different # from regular Conv2D. p.Delete('filter_shape') p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' channel_multipliers. ') return p @base_layer.initializer def __init__(self, params): super(DepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[2], p.filter_shape[3]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [..., in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last two dims. filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape( (theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer): """DepthwiseConv2DLayer where weights are normalized over the time dim. https://arxiv.org/abs/1901.10430 """ @classmethod def Params(cls): p = super(NormalizedDepthwiseConv2DLayer, cls).Params() p.Define('dropconnect_prob', 0.0, 'Prob at which DropConnect regularization is performed.') p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.') p.Define('temperature', 1.0, 'Temperature for the softmax normalization of the weights.') p.Define('weight_tiling_factor', 1, 'Number of times weights are tiled over the input channels.') return p @base_layer.initializer def __init__(self, params): super(NormalizedDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.' assert p.temperature > 0.0, 'Absolute zero temperature is not possible.' @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [kernel_size, 1, in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor @property def input_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[2] * p.weight_tiling_factor def _GetWeight(self, theta): p = self.params filter_w = theta.w # First normalize filter_w over the temporal dimension here. filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0) # Add dropconnect on the weights for regularization. if p.dropconnect_prob > 0.0 and not p.is_eval: if p.deterministic_dropout: filter_w = py_utils.DeterministicDropout( filter_w, 1.0 - p.dropconnect_prob, py_utils.GenerateStepSeedPair(p, theta.global_step)) else: filter_w = tf.nn.dropout( filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed) # Tie the parameters of every subsequent number of weight_tiling_factor # channels. filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1]) return filter_w @classmethod def FPropMeta(cls, p, inputs, paddings): py_utils.CheckShapes((inputs, paddings)) b, t, f, ic = inputs assert f == 1 oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor outputs = tshape.Shape([b, t, f, oc]) flops = b * t * f * p.filter_shape[0] * ic * oc * 5 return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings)) class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" # Same as CausalDepthwiseConv2DLayer. p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class ConvBatchNormLayer(bn_layers.BatchNormLayer): """A wrapper around regular BatchNormLayer that pass around the ... paddings layers. """ def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) bned = super(ConvBatchNormLayer, self).FProp( theta, inputs, paddings_expanded) return bned, paddings # Supported activation functions. _ACTIVATIONS = { 'RELU': tf.nn.relu, 'RELU6': tf.nn.relu6, 'SIGMOID': tf.sigmoid, 'TANH': tf.tanh, 'SWISH': tf.nn.swish, 'NONE': tf.identity, } class ActivationLayer(base_layer.BaseLayer): """Applies activation function to the inputs.""" @classmethod def Params(cls): p = super(ActivationLayer, cls).Params() p.Define('activation', 'RELU', 'The activation function to apply') return p def FProp(self, theta, inputs, paddings): p = self.params out = _ACTIVATIONS[p.activation](inputs) return out, paddings class PaddingLayer(base_layer.BaseLayer): """Zeros out padded positions.""" def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) return inputs * (1.0 - paddings_expanded), paddings
def ComputeConvOutputShape(in_shape, t_stride, f_stride, outc=None, padding='SAME'): """Computes output shape for convolution and pooling layers. If `in_shape` is a dynamic shape, the output will be Tensors, while if `in_shape` is a list of ints then the output will also be a list of ints. Args: in_shape: A length 4 Tensor or list representing the input shape. t_stride: The stride along the time dimension. f_stride: The stride along the frequency dimension. outc: The expected output channel. If None, will use the input channel. padding: 'SAME' or 'VALID'. Returns: The expected output shape. """ # In the order of batch, time, frequency, channel n = in_shape[0] t = in_shape[1] f = in_shape[2] c = in_shape[3] # Last two dimensions has to be specified. assert f is not None and c is not None if padding == 'VALID': if t: t -= t_stride - 1 f -= f_stride - 1 ot = t if ot is not None: ot = (ot + t_stride - 1) // t_stride of = (f + f_stride - 1) // f_stride if outc is None: outc = c return [n, ot, of, outc]
30
67
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common conv layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from lingvo.core import base_layer from lingvo.core import bn_layers from lingvo.core import py_utils from lingvo.core import tshape def ComputeConvOutputShape(in_shape, t_stride, f_stride, outc=None, padding='SAME'): """Computes output shape for convolution and pooling layers. If `in_shape` is a dynamic shape, the output will be Tensors, while if `in_shape` is a list of ints then the output will also be a list of ints. Args: in_shape: A length 4 Tensor or list representing the input shape. t_stride: The stride along the time dimension. f_stride: The stride along the frequency dimension. outc: The expected output channel. If None, will use the input channel. padding: 'SAME' or 'VALID'. Returns: The expected output shape. """ # In the order of batch, time, frequency, channel n = in_shape[0] t = in_shape[1] f = in_shape[2] c = in_shape[3] # Last two dimensions has to be specified. assert f is not None and c is not None if padding == 'VALID': if t: t -= t_stride - 1 f -= f_stride - 1 ot = t if ot is not None: ot = (ot + t_stride - 1) // t_stride of = (f + f_stride - 1) // f_stride if outc is None: outc = c return [n, ot, of, outc] def ComputeConvOutputPadding(paddings, window, stride, padding_algorithm='SAME'): """Computes paddings for convolution and pooling output. out_padding[i] == 1 iff any in_padding corresponding to that output is 1. Args: paddings: The paddings tensor. It is expected to be of shape [batch, time]. window: The size of the windows. stride: The time-stride between adjacent windows. padding_algorithm: 'SAME' or 'VALID'. Returns: out_padding, The new padding tensor of size [batch, ceil(time / stride)]. """ if stride == 1: return paddings # Pad so input_length divides stride. input_length = py_utils.GetShape(paddings)[1] pad_len = (input_length + stride - 1) // stride * stride - input_length paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0) out_padding = tf.nn.pool( tf.expand_dims(paddings, -1), [window], 'MAX', padding_algorithm, strides=[stride], ) return tf.squeeze(out_padding, -1) class BaseConv2DLayerWithPadding(base_layer.BaseLayer): """Base class for 2D convolution layers.""" @classmethod def Params(cls): p = super(BaseConv2DLayerWithPadding, cls).Params() p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' out_channel. For causal convolution, filter_shape[0]' ' is the actual number of trained weights in the time dimension' ' of the kernel.') p.Define( 'filter_stride', (1, 1), 'Filter stride to use. Must be a pair of ints. The first int' ' specifies the stride on the time dimension. The second int' ' specifies the stride on the frequency dimension.') p.Define( 'dilation_rate', (1, 1), 'If > 1, dilation rate for atrous convolution. ' 'Must be a pair of ints. ' 'The first int specifies the dilation rate on the time dimension. ' 'The second int specifies the dilation rate on the frequency ' 'dimension. ' 'If any value of dilation_rate is > 1, then all values of strides ' 'must be 1.') p.Define( 'weight_norm', False, 'If true, apply weight normalization to weights as proposed by' ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868') return p @base_layer.initializer def __init__(self, params): super(BaseConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name assert len(p.filter_shape) == 4 assert len(p.filter_stride) == 2 assert all(x > 0 for x in p.filter_shape) assert all(x > 0 for x in p.filter_stride) assert len(p.dilation_rate) == 2 assert all(x > 0 for x in p.dilation_rate) # Dilation and stride can't be combined. if any(x > 1 for x in p.dilation_rate): assert all(x == 1 for x in p.filter_stride) @property def output_channels(self): """The number of output channels for this conv layer.""" raise NotImplementedError() @property def input_channels(self): """The number of input channels for this conv layer.""" return self.params.filter_shape[2] def OutShape(self, in_shape): """Compute the output shape given the input shape.""" p = self.params return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) def FProp(self, theta, inputs, paddings): """Apply convolution to inputs. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. inputs: The inputs tensor. It is expected to be of shape [batch, time, frequency, channel]. The time dimension corresponds to the height dimension as in images and the frequency dimension corresponds to the width dimension as in images. paddings: The paddings tensor, expected to be of shape [batch, time]. Returns: outputs, out_paddings pair. """ p = self.params with tf.name_scope(p.name): inputs = py_utils.with_dependencies([ py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]), py_utils.assert_shape_match( tf.shape(inputs), tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0)) ], inputs) def _ApplyPadding(tensor_in, padding_in): padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1) return tensor_in * (1.0 - padding_expanded) # Zeroing out padded inputs. inputs = _ApplyPadding(inputs, paddings) # Evaluate the conv kernel on 'inputs'. out = self._EvaluateConvKernel(theta, inputs) # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1. # But there's likely no real problems. Trying to set it gives an error: # pooling with SAME padding is not implemented for dilation_rate > 1. # NOTE: we use window=p.filter_stride[0] to be compatible with legacy # implementation. Consider updating it to be the actual shape. conv_padding = ComputeConvOutputPadding( paddings, window=p.filter_stride[0], stride=p.filter_stride[0]) # Assuming padded nodes will be properly zero-ed out if necessary by # sub-sequent layers. # out = _ApplyPadding(out, conv_padding) out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs))) return out, conv_padding def _EvaluateConvKernel(self, theta, conv_input): """Evaluate the convolution kernel on input 'conv_input'.""" raise NotImplementedError class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding): """Conv2D layer.""" @base_layer.initializer def __init__(self, params): super(Conv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[-1]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[-1] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last dim (standard conv). filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape( (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding): """2D conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding): """Depthwise conv 2D layer. paper: https://arxiv.org/abs/1610.02357 """ @classmethod def Params(cls): p = super(DepthwiseConv2DLayer, cls).Params() # Redefine 'filter_shape' since the semantic of shape elements is different # from regular Conv2D. p.Delete('filter_shape') p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' channel_multipliers. ') return p @base_layer.initializer def __init__(self, params): super(DepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[2], p.filter_shape[3]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [..., in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last two dims. filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape( (theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer): """DepthwiseConv2DLayer where weights are normalized over the time dim. https://arxiv.org/abs/1901.10430 """ @classmethod def Params(cls): p = super(NormalizedDepthwiseConv2DLayer, cls).Params() p.Define('dropconnect_prob', 0.0, 'Prob at which DropConnect regularization is performed.') p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.') p.Define('temperature', 1.0, 'Temperature for the softmax normalization of the weights.') p.Define('weight_tiling_factor', 1, 'Number of times weights are tiled over the input channels.') return p @base_layer.initializer def __init__(self, params): super(NormalizedDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.' assert p.temperature > 0.0, 'Absolute zero temperature is not possible.' @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [kernel_size, 1, in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor @property def input_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[2] * p.weight_tiling_factor def _GetWeight(self, theta): p = self.params filter_w = theta.w # First normalize filter_w over the temporal dimension here. filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0) # Add dropconnect on the weights for regularization. if p.dropconnect_prob > 0.0 and not p.is_eval: if p.deterministic_dropout: filter_w = py_utils.DeterministicDropout( filter_w, 1.0 - p.dropconnect_prob, py_utils.GenerateStepSeedPair(p, theta.global_step)) else: filter_w = tf.nn.dropout( filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed) # Tie the parameters of every subsequent number of weight_tiling_factor # channels. filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1]) return filter_w @classmethod def FPropMeta(cls, p, inputs, paddings): py_utils.CheckShapes((inputs, paddings)) b, t, f, ic = inputs assert f == 1 oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor outputs = tshape.Shape([b, t, f, oc]) flops = b * t * f * p.filter_shape[0] * ic * oc * 5 return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings)) class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" # Same as CausalDepthwiseConv2DLayer. p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class ConvBatchNormLayer(bn_layers.BatchNormLayer): """A wrapper around regular BatchNormLayer that pass around the ... paddings layers. """ def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) bned = super(ConvBatchNormLayer, self).FProp( theta, inputs, paddings_expanded) return bned, paddings # Supported activation functions. _ACTIVATIONS = { 'RELU': tf.nn.relu, 'RELU6': tf.nn.relu6, 'SIGMOID': tf.sigmoid, 'TANH': tf.tanh, 'SWISH': tf.nn.swish, 'NONE': tf.identity, } class ActivationLayer(base_layer.BaseLayer): """Applies activation function to the inputs.""" @classmethod def Params(cls): p = super(ActivationLayer, cls).Params() p.Define('activation', 'RELU', 'The activation function to apply') return p def FProp(self, theta, inputs, paddings): p = self.params out = _ACTIVATIONS[p.activation](inputs) return out, paddings class PaddingLayer(base_layer.BaseLayer): """Zeros out padded positions.""" def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) return inputs * (1.0 - paddings_expanded), paddings
ComputeConvOutputPadding
Computes paddings for convolution and pooling output. out_padding[i] == 1 iff any in_padding corresponding to that output is 1. Args: paddings: The paddings tensor. It is expected to be of shape [batch, time]. window: The size of the windows. stride: The time-stride between adjacent windows. padding_algorithm: 'SAME' or 'VALID'. Returns: out_padding, The new padding tensor of size [batch, ceil(time / stride)].
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common conv layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from lingvo.core import base_layer from lingvo.core import bn_layers from lingvo.core import py_utils from lingvo.core import tshape def ComputeConvOutputShape(in_shape, t_stride, f_stride, outc=None, padding='SAME'): """Computes output shape for convolution and pooling layers. If `in_shape` is a dynamic shape, the output will be Tensors, while if `in_shape` is a list of ints then the output will also be a list of ints. Args: in_shape: A length 4 Tensor or list representing the input shape. t_stride: The stride along the time dimension. f_stride: The stride along the frequency dimension. outc: The expected output channel. If None, will use the input channel. padding: 'SAME' or 'VALID'. Returns: The expected output shape. """ # In the order of batch, time, frequency, channel n = in_shape[0] t = in_shape[1] f = in_shape[2] c = in_shape[3] # Last two dimensions has to be specified. assert f is not None and c is not None if padding == 'VALID': if t: t -= t_stride - 1 f -= f_stride - 1 ot = t if ot is not None: ot = (ot + t_stride - 1) // t_stride of = (f + f_stride - 1) // f_stride if outc is None: outc = c return [n, ot, of, outc] # MASKED: ComputeConvOutputPadding function (lines 70-99) class BaseConv2DLayerWithPadding(base_layer.BaseLayer): """Base class for 2D convolution layers.""" @classmethod def Params(cls): p = super(BaseConv2DLayerWithPadding, cls).Params() p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' out_channel. For causal convolution, filter_shape[0]' ' is the actual number of trained weights in the time dimension' ' of the kernel.') p.Define( 'filter_stride', (1, 1), 'Filter stride to use. Must be a pair of ints. The first int' ' specifies the stride on the time dimension. The second int' ' specifies the stride on the frequency dimension.') p.Define( 'dilation_rate', (1, 1), 'If > 1, dilation rate for atrous convolution. ' 'Must be a pair of ints. ' 'The first int specifies the dilation rate on the time dimension. ' 'The second int specifies the dilation rate on the frequency ' 'dimension. ' 'If any value of dilation_rate is > 1, then all values of strides ' 'must be 1.') p.Define( 'weight_norm', False, 'If true, apply weight normalization to weights as proposed by' ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868') return p @base_layer.initializer def __init__(self, params): super(BaseConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name assert len(p.filter_shape) == 4 assert len(p.filter_stride) == 2 assert all(x > 0 for x in p.filter_shape) assert all(x > 0 for x in p.filter_stride) assert len(p.dilation_rate) == 2 assert all(x > 0 for x in p.dilation_rate) # Dilation and stride can't be combined. if any(x > 1 for x in p.dilation_rate): assert all(x == 1 for x in p.filter_stride) @property def output_channels(self): """The number of output channels for this conv layer.""" raise NotImplementedError() @property def input_channels(self): """The number of input channels for this conv layer.""" return self.params.filter_shape[2] def OutShape(self, in_shape): """Compute the output shape given the input shape.""" p = self.params return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) def FProp(self, theta, inputs, paddings): """Apply convolution to inputs. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. inputs: The inputs tensor. It is expected to be of shape [batch, time, frequency, channel]. The time dimension corresponds to the height dimension as in images and the frequency dimension corresponds to the width dimension as in images. paddings: The paddings tensor, expected to be of shape [batch, time]. Returns: outputs, out_paddings pair. """ p = self.params with tf.name_scope(p.name): inputs = py_utils.with_dependencies([ py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]), py_utils.assert_shape_match( tf.shape(inputs), tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0)) ], inputs) def _ApplyPadding(tensor_in, padding_in): padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1) return tensor_in * (1.0 - padding_expanded) # Zeroing out padded inputs. inputs = _ApplyPadding(inputs, paddings) # Evaluate the conv kernel on 'inputs'. out = self._EvaluateConvKernel(theta, inputs) # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1. # But there's likely no real problems. Trying to set it gives an error: # pooling with SAME padding is not implemented for dilation_rate > 1. # NOTE: we use window=p.filter_stride[0] to be compatible with legacy # implementation. Consider updating it to be the actual shape. conv_padding = ComputeConvOutputPadding( paddings, window=p.filter_stride[0], stride=p.filter_stride[0]) # Assuming padded nodes will be properly zero-ed out if necessary by # sub-sequent layers. # out = _ApplyPadding(out, conv_padding) out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs))) return out, conv_padding def _EvaluateConvKernel(self, theta, conv_input): """Evaluate the convolution kernel on input 'conv_input'.""" raise NotImplementedError class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding): """Conv2D layer.""" @base_layer.initializer def __init__(self, params): super(Conv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[-1]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[-1] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last dim (standard conv). filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape( (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding): """2D conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding): """Depthwise conv 2D layer. paper: https://arxiv.org/abs/1610.02357 """ @classmethod def Params(cls): p = super(DepthwiseConv2DLayer, cls).Params() # Redefine 'filter_shape' since the semantic of shape elements is different # from regular Conv2D. p.Delete('filter_shape') p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' channel_multipliers. ') return p @base_layer.initializer def __init__(self, params): super(DepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[2], p.filter_shape[3]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [..., in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last two dims. filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape( (theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer): """DepthwiseConv2DLayer where weights are normalized over the time dim. https://arxiv.org/abs/1901.10430 """ @classmethod def Params(cls): p = super(NormalizedDepthwiseConv2DLayer, cls).Params() p.Define('dropconnect_prob', 0.0, 'Prob at which DropConnect regularization is performed.') p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.') p.Define('temperature', 1.0, 'Temperature for the softmax normalization of the weights.') p.Define('weight_tiling_factor', 1, 'Number of times weights are tiled over the input channels.') return p @base_layer.initializer def __init__(self, params): super(NormalizedDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.' assert p.temperature > 0.0, 'Absolute zero temperature is not possible.' @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [kernel_size, 1, in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor @property def input_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[2] * p.weight_tiling_factor def _GetWeight(self, theta): p = self.params filter_w = theta.w # First normalize filter_w over the temporal dimension here. filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0) # Add dropconnect on the weights for regularization. if p.dropconnect_prob > 0.0 and not p.is_eval: if p.deterministic_dropout: filter_w = py_utils.DeterministicDropout( filter_w, 1.0 - p.dropconnect_prob, py_utils.GenerateStepSeedPair(p, theta.global_step)) else: filter_w = tf.nn.dropout( filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed) # Tie the parameters of every subsequent number of weight_tiling_factor # channels. filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1]) return filter_w @classmethod def FPropMeta(cls, p, inputs, paddings): py_utils.CheckShapes((inputs, paddings)) b, t, f, ic = inputs assert f == 1 oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor outputs = tshape.Shape([b, t, f, oc]) flops = b * t * f * p.filter_shape[0] * ic * oc * 5 return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings)) class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" # Same as CausalDepthwiseConv2DLayer. p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class ConvBatchNormLayer(bn_layers.BatchNormLayer): """A wrapper around regular BatchNormLayer that pass around the ... paddings layers. """ def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) bned = super(ConvBatchNormLayer, self).FProp( theta, inputs, paddings_expanded) return bned, paddings # Supported activation functions. _ACTIVATIONS = { 'RELU': tf.nn.relu, 'RELU6': tf.nn.relu6, 'SIGMOID': tf.sigmoid, 'TANH': tf.tanh, 'SWISH': tf.nn.swish, 'NONE': tf.identity, } class ActivationLayer(base_layer.BaseLayer): """Applies activation function to the inputs.""" @classmethod def Params(cls): p = super(ActivationLayer, cls).Params() p.Define('activation', 'RELU', 'The activation function to apply') return p def FProp(self, theta, inputs, paddings): p = self.params out = _ACTIVATIONS[p.activation](inputs) return out, paddings class PaddingLayer(base_layer.BaseLayer): """Zeros out padded positions.""" def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) return inputs * (1.0 - paddings_expanded), paddings
def ComputeConvOutputPadding(paddings, window, stride, padding_algorithm='SAME'): """Computes paddings for convolution and pooling output. out_padding[i] == 1 iff any in_padding corresponding to that output is 1. Args: paddings: The paddings tensor. It is expected to be of shape [batch, time]. window: The size of the windows. stride: The time-stride between adjacent windows. padding_algorithm: 'SAME' or 'VALID'. Returns: out_padding, The new padding tensor of size [batch, ceil(time / stride)]. """ if stride == 1: return paddings # Pad so input_length divides stride. input_length = py_utils.GetShape(paddings)[1] pad_len = (input_length + stride - 1) // stride * stride - input_length paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0) out_padding = tf.nn.pool( tf.expand_dims(paddings, -1), [window], 'MAX', padding_algorithm, strides=[stride], ) return tf.squeeze(out_padding, -1)
70
99
# Lint as: python2, python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common conv layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from lingvo.core import base_layer from lingvo.core import bn_layers from lingvo.core import py_utils from lingvo.core import tshape def ComputeConvOutputShape(in_shape, t_stride, f_stride, outc=None, padding='SAME'): """Computes output shape for convolution and pooling layers. If `in_shape` is a dynamic shape, the output will be Tensors, while if `in_shape` is a list of ints then the output will also be a list of ints. Args: in_shape: A length 4 Tensor or list representing the input shape. t_stride: The stride along the time dimension. f_stride: The stride along the frequency dimension. outc: The expected output channel. If None, will use the input channel. padding: 'SAME' or 'VALID'. Returns: The expected output shape. """ # In the order of batch, time, frequency, channel n = in_shape[0] t = in_shape[1] f = in_shape[2] c = in_shape[3] # Last two dimensions has to be specified. assert f is not None and c is not None if padding == 'VALID': if t: t -= t_stride - 1 f -= f_stride - 1 ot = t if ot is not None: ot = (ot + t_stride - 1) // t_stride of = (f + f_stride - 1) // f_stride if outc is None: outc = c return [n, ot, of, outc] def ComputeConvOutputPadding(paddings, window, stride, padding_algorithm='SAME'): """Computes paddings for convolution and pooling output. out_padding[i] == 1 iff any in_padding corresponding to that output is 1. Args: paddings: The paddings tensor. It is expected to be of shape [batch, time]. window: The size of the windows. stride: The time-stride between adjacent windows. padding_algorithm: 'SAME' or 'VALID'. Returns: out_padding, The new padding tensor of size [batch, ceil(time / stride)]. """ if stride == 1: return paddings # Pad so input_length divides stride. input_length = py_utils.GetShape(paddings)[1] pad_len = (input_length + stride - 1) // stride * stride - input_length paddings = tf.pad(paddings, [[0, 0], [0, pad_len]], constant_values=1.0) out_padding = tf.nn.pool( tf.expand_dims(paddings, -1), [window], 'MAX', padding_algorithm, strides=[stride], ) return tf.squeeze(out_padding, -1) class BaseConv2DLayerWithPadding(base_layer.BaseLayer): """Base class for 2D convolution layers.""" @classmethod def Params(cls): p = super(BaseConv2DLayerWithPadding, cls).Params() p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' out_channel. For causal convolution, filter_shape[0]' ' is the actual number of trained weights in the time dimension' ' of the kernel.') p.Define( 'filter_stride', (1, 1), 'Filter stride to use. Must be a pair of ints. The first int' ' specifies the stride on the time dimension. The second int' ' specifies the stride on the frequency dimension.') p.Define( 'dilation_rate', (1, 1), 'If > 1, dilation rate for atrous convolution. ' 'Must be a pair of ints. ' 'The first int specifies the dilation rate on the time dimension. ' 'The second int specifies the dilation rate on the frequency ' 'dimension. ' 'If any value of dilation_rate is > 1, then all values of strides ' 'must be 1.') p.Define( 'weight_norm', False, 'If true, apply weight normalization to weights as proposed by' ' Salimans and Kingma, 2016: https://arxiv.org/abs/1602.07868') return p @base_layer.initializer def __init__(self, params): super(BaseConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name assert len(p.filter_shape) == 4 assert len(p.filter_stride) == 2 assert all(x > 0 for x in p.filter_shape) assert all(x > 0 for x in p.filter_stride) assert len(p.dilation_rate) == 2 assert all(x > 0 for x in p.dilation_rate) # Dilation and stride can't be combined. if any(x > 1 for x in p.dilation_rate): assert all(x == 1 for x in p.filter_stride) @property def output_channels(self): """The number of output channels for this conv layer.""" raise NotImplementedError() @property def input_channels(self): """The number of input channels for this conv layer.""" return self.params.filter_shape[2] def OutShape(self, in_shape): """Compute the output shape given the input shape.""" p = self.params return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) def FProp(self, theta, inputs, paddings): """Apply convolution to inputs. Args: theta: A `.NestedMap` object containing weights' values of this layer and its children layers. inputs: The inputs tensor. It is expected to be of shape [batch, time, frequency, channel]. The time dimension corresponds to the height dimension as in images and the frequency dimension corresponds to the width dimension as in images. paddings: The paddings tensor, expected to be of shape [batch, time]. Returns: outputs, out_paddings pair. """ p = self.params with tf.name_scope(p.name): inputs = py_utils.with_dependencies([ py_utils.assert_shape_match(tf.shape(paddings), [-1, -1]), py_utils.assert_shape_match( tf.shape(inputs), tf.concat([tf.shape(paddings), [-1, self.input_channels]], 0)) ], inputs) def _ApplyPadding(tensor_in, padding_in): padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, -1), -1) return tensor_in * (1.0 - padding_expanded) # Zeroing out padded inputs. inputs = _ApplyPadding(inputs, paddings) # Evaluate the conv kernel on 'inputs'. out = self._EvaluateConvKernel(theta, inputs) # NOTE: this may be slightly inaccurate when p.dilation_rate[0] > 1. # But there's likely no real problems. Trying to set it gives an error: # pooling with SAME padding is not implemented for dilation_rate > 1. # NOTE: we use window=p.filter_stride[0] to be compatible with legacy # implementation. Consider updating it to be the actual shape. conv_padding = ComputeConvOutputPadding( paddings, window=p.filter_stride[0], stride=p.filter_stride[0]) # Assuming padded nodes will be properly zero-ed out if necessary by # sub-sequent layers. # out = _ApplyPadding(out, conv_padding) out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs))) return out, conv_padding def _EvaluateConvKernel(self, theta, conv_input): """Evaluate the convolution kernel on input 'conv_input'.""" raise NotImplementedError class Conv2DLayerWithPadding(BaseConv2DLayerWithPadding): """Conv2D layer.""" @base_layer.initializer def __init__(self, params): super(Conv2DLayerWithPadding, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[-1]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[-1] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last dim (standard conv). filter_w = tf.nn.l2_normalize(theta.w, [0, 1, 2]) * tf.reshape( (theta.g + 1.0), [1, 1, 1, p.filter_shape[-1]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalConv2DLayerWithPadding(Conv2DLayerWithPadding): """2D conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalConv2DLayerWithPadding, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.convolution( inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class DepthwiseConv2DLayer(BaseConv2DLayerWithPadding): """Depthwise conv 2D layer. paper: https://arxiv.org/abs/1610.02357 """ @classmethod def Params(cls): p = super(DepthwiseConv2DLayer, cls).Params() # Redefine 'filter_shape' since the semantic of shape elements is different # from regular Conv2D. p.Delete('filter_shape') p.Define( 'filter_shape', (0, 0, 0, 0), 'Filter shape. Must be a sequence of length 4. Elements are in' ' the order of height (time), width (frequency), in_channel,' ' channel_multipliers. ') return p @base_layer.initializer def __init__(self, params): super(DepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.name w_pc = py_utils.WeightParams( shape=p.filter_shape, init=p.params_init, dtype=p.dtype, collections=[self.__class__.__name__ + '_vars']) with tf.variable_scope(p.name): self.CreateVariable('w', w_pc) if p.weight_norm: self.CreateVariable( 'g', py_utils.WeightParams( shape=[p.filter_shape[2], p.filter_shape[3]], init=py_utils.WeightInit.Constant(0.0), dtype=p.dtype, collections=[self.__class__.__name__ + '_vars'])) @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [..., in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] def _GetWeight(self, theta): p = self.params if p.weight_norm: # Normalize along the last two dims. filter_w = tf.nn.l2_normalize(theta.w, [0, 1]) * tf.reshape( (theta.g + 1.0), [1, 1, p.filter_shape[2], p.filter_shape[3]]) else: filter_w = theta.w return filter_w def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') class CausalDepthwiseConv2DLayer(DepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" @base_layer.initializer def __init__(self, params): super(CausalDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d causal convolution is supported.' def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' # Use VALID padding and shift the inputs to the right to ensure that the # first output only depends on the first input and so on. The output is # the same size as the input, as if the convolution used SAME padding. padding_algorithm = 'VALID' # The effective spatial filter width for dilated convolutions is # (kernel_width - 1) * dilation_rate + 1 as according to # https://www.tensorflow.org/api_docs/python/tf/nn/convolution. causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class NormalizedDepthwiseConv2DLayer(DepthwiseConv2DLayer): """DepthwiseConv2DLayer where weights are normalized over the time dim. https://arxiv.org/abs/1901.10430 """ @classmethod def Params(cls): p = super(NormalizedDepthwiseConv2DLayer, cls).Params() p.Define('dropconnect_prob', 0.0, 'Prob at which DropConnect regularization is performed.') p.Define('deterministic_dropout', False, 'Use determnisitc dropout or not.') p.Define('temperature', 1.0, 'Temperature for the softmax normalization of the weights.') p.Define('weight_tiling_factor', 1, 'Number of times weights are tiled over the input channels.') return p @base_layer.initializer def __init__(self, params): super(NormalizedDepthwiseConv2DLayer, self).__init__(params) p = self.params assert p.filter_shape[1] == 1, 'Only 1d convolution is supported.' assert p.temperature > 0.0, 'Absolute zero temperature is not possible.' @property def output_channels(self): """The number of output channels for this conv layer.""" p = self.params # Depthwise convolution filter shape is: # [kernel_size, 1, in_channels, channel_multiplier]. return p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor @property def input_channels(self): """The number of output channels for this conv layer.""" p = self.params return p.filter_shape[2] * p.weight_tiling_factor def _GetWeight(self, theta): p = self.params filter_w = theta.w # First normalize filter_w over the temporal dimension here. filter_w = tf.nn.softmax(filter_w / p.temperature, axis=0) # Add dropconnect on the weights for regularization. if p.dropconnect_prob > 0.0 and not p.is_eval: if p.deterministic_dropout: filter_w = py_utils.DeterministicDropout( filter_w, 1.0 - p.dropconnect_prob, py_utils.GenerateStepSeedPair(p, theta.global_step)) else: filter_w = tf.nn.dropout( filter_w, 1.0 - p.dropconnect_prob, seed=p.random_seed) # Tie the parameters of every subsequent number of weight_tiling_factor # channels. filter_w = tf.tile(filter_w, [1, 1, p.weight_tiling_factor, 1]) return filter_w @classmethod def FPropMeta(cls, p, inputs, paddings): py_utils.CheckShapes((inputs, paddings)) b, t, f, ic = inputs assert f == 1 oc = p.filter_shape[2] * p.filter_shape[3] * p.weight_tiling_factor outputs = tshape.Shape([b, t, f, oc]) flops = b * t * f * p.filter_shape[0] * ic * oc * 5 return py_utils.NestedMap(flops=flops, out_shapes=(outputs, paddings)) class CausalNormalizedDepthwiseConv2DLayer(NormalizedDepthwiseConv2DLayer): """Depthwise conv layer with causal dependency on the time axis.""" def _EvaluateConvKernel(self, theta, inputs): """Apply convolution to inputs.""" # Same as CausalDepthwiseConv2DLayer. p = self.params assert p.filter_shape[1] == 1, 'Only 1D causal convolutions supported.' padding_algorithm = 'VALID' causal_pad_size = (p.filter_shape[0] - 1) * p.dilation_rate[0] inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]]) filter_w = self._GetWeight(theta) return tf.nn.depthwise_conv2d( inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) class ConvBatchNormLayer(bn_layers.BatchNormLayer): """A wrapper around regular BatchNormLayer that pass around the ... paddings layers. """ def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) bned = super(ConvBatchNormLayer, self).FProp( theta, inputs, paddings_expanded) return bned, paddings # Supported activation functions. _ACTIVATIONS = { 'RELU': tf.nn.relu, 'RELU6': tf.nn.relu6, 'SIGMOID': tf.sigmoid, 'TANH': tf.tanh, 'SWISH': tf.nn.swish, 'NONE': tf.identity, } class ActivationLayer(base_layer.BaseLayer): """Applies activation function to the inputs.""" @classmethod def Params(cls): p = super(ActivationLayer, cls).Params() p.Define('activation', 'RELU', 'The activation function to apply') return p def FProp(self, theta, inputs, paddings): p = self.params out = _ACTIVATIONS[p.activation](inputs) return out, paddings class PaddingLayer(base_layer.BaseLayer): """Zeros out padded positions.""" def FProp(self, theta, inputs, paddings): paddings_expanded = tf.expand_dims(tf.expand_dims(paddings, -1), -1) return inputs * (1.0 - paddings_expanded), paddings
list_topic_keys
Namespace/ServiceBus Connection String API Version: 2017-04-01. :param str authorization_rule_name: The authorization rule name. :param str namespace_name: The namespace name :param str resource_group_name: Name of the Resource group within the Azure subscription. :param str topic_name: The topic name.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables __all__ = [ 'ListTopicKeysResult', 'AwaitableListTopicKeysResult', 'list_topic_keys', ] @pulumi.output_type class ListTopicKeysResult: """ Namespace/ServiceBus Connection String """ def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None): if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str): raise TypeError("Expected argument 'alias_primary_connection_string' to be a str") pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string) if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str): raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str") pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string) if key_name and not isinstance(key_name, str): raise TypeError("Expected argument 'key_name' to be a str") pulumi.set(__self__, "key_name", key_name) if primary_connection_string and not isinstance(primary_connection_string, str): raise TypeError("Expected argument 'primary_connection_string' to be a str") pulumi.set(__self__, "primary_connection_string", primary_connection_string) if primary_key and not isinstance(primary_key, str): raise TypeError("Expected argument 'primary_key' to be a str") pulumi.set(__self__, "primary_key", primary_key) if secondary_connection_string and not isinstance(secondary_connection_string, str): raise TypeError("Expected argument 'secondary_connection_string' to be a str") pulumi.set(__self__, "secondary_connection_string", secondary_connection_string) if secondary_key and not isinstance(secondary_key, str): raise TypeError("Expected argument 'secondary_key' to be a str") pulumi.set(__self__, "secondary_key", secondary_key) @property @pulumi.getter(name="aliasPrimaryConnectionString") def alias_primary_connection_string(self) -> str: """ Primary connection string of the alias if GEO DR is enabled """ return pulumi.get(self, "alias_primary_connection_string") @property @pulumi.getter(name="aliasSecondaryConnectionString") def alias_secondary_connection_string(self) -> str: """ Secondary connection string of the alias if GEO DR is enabled """ return pulumi.get(self, "alias_secondary_connection_string") @property @pulumi.getter(name="keyName") def key_name(self) -> str: """ A string that describes the authorization rule. """ return pulumi.get(self, "key_name") @property @pulumi.getter(name="primaryConnectionString") def primary_connection_string(self) -> str: """ Primary connection string of the created namespace authorization rule. """ return pulumi.get(self, "primary_connection_string") @property @pulumi.getter(name="primaryKey") def primary_key(self) -> str: """ A base64-encoded 256-bit primary key for signing and validating the SAS token. """ return pulumi.get(self, "primary_key") @property @pulumi.getter(name="secondaryConnectionString") def secondary_connection_string(self) -> str: """ Secondary connection string of the created namespace authorization rule. """ return pulumi.get(self, "secondary_connection_string") @property @pulumi.getter(name="secondaryKey") def secondary_key(self) -> str: """ A base64-encoded 256-bit primary key for signing and validating the SAS token. """ return pulumi.get(self, "secondary_key") class AwaitableListTopicKeysResult(ListTopicKeysResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return ListTopicKeysResult( alias_primary_connection_string=self.alias_primary_connection_string, alias_secondary_connection_string=self.alias_secondary_connection_string, key_name=self.key_name, primary_connection_string=self.primary_connection_string, primary_key=self.primary_key, secondary_connection_string=self.secondary_connection_string, secondary_key=self.secondary_key) # MASKED: list_topic_keys function (lines 117-150)
def list_topic_keys(authorization_rule_name: Optional[str] = None, namespace_name: Optional[str] = None, resource_group_name: Optional[str] = None, topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicKeysResult: """ Namespace/ServiceBus Connection String API Version: 2017-04-01. :param str authorization_rule_name: The authorization rule name. :param str namespace_name: The namespace name :param str resource_group_name: Name of the Resource group within the Azure subscription. :param str topic_name: The topic name. """ __args__ = dict() __args__['authorizationRuleName'] = authorization_rule_name __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name __args__['topicName'] = topic_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value return AwaitableListTopicKeysResult( alias_primary_connection_string=__ret__.alias_primary_connection_string, alias_secondary_connection_string=__ret__.alias_secondary_connection_string, key_name=__ret__.key_name, primary_connection_string=__ret__.primary_connection_string, primary_key=__ret__.primary_key, secondary_connection_string=__ret__.secondary_connection_string, secondary_key=__ret__.secondary_key)
117
150
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables __all__ = [ 'ListTopicKeysResult', 'AwaitableListTopicKeysResult', 'list_topic_keys', ] @pulumi.output_type class ListTopicKeysResult: """ Namespace/ServiceBus Connection String """ def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None): if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str): raise TypeError("Expected argument 'alias_primary_connection_string' to be a str") pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string) if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str): raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str") pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string) if key_name and not isinstance(key_name, str): raise TypeError("Expected argument 'key_name' to be a str") pulumi.set(__self__, "key_name", key_name) if primary_connection_string and not isinstance(primary_connection_string, str): raise TypeError("Expected argument 'primary_connection_string' to be a str") pulumi.set(__self__, "primary_connection_string", primary_connection_string) if primary_key and not isinstance(primary_key, str): raise TypeError("Expected argument 'primary_key' to be a str") pulumi.set(__self__, "primary_key", primary_key) if secondary_connection_string and not isinstance(secondary_connection_string, str): raise TypeError("Expected argument 'secondary_connection_string' to be a str") pulumi.set(__self__, "secondary_connection_string", secondary_connection_string) if secondary_key and not isinstance(secondary_key, str): raise TypeError("Expected argument 'secondary_key' to be a str") pulumi.set(__self__, "secondary_key", secondary_key) @property @pulumi.getter(name="aliasPrimaryConnectionString") def alias_primary_connection_string(self) -> str: """ Primary connection string of the alias if GEO DR is enabled """ return pulumi.get(self, "alias_primary_connection_string") @property @pulumi.getter(name="aliasSecondaryConnectionString") def alias_secondary_connection_string(self) -> str: """ Secondary connection string of the alias if GEO DR is enabled """ return pulumi.get(self, "alias_secondary_connection_string") @property @pulumi.getter(name="keyName") def key_name(self) -> str: """ A string that describes the authorization rule. """ return pulumi.get(self, "key_name") @property @pulumi.getter(name="primaryConnectionString") def primary_connection_string(self) -> str: """ Primary connection string of the created namespace authorization rule. """ return pulumi.get(self, "primary_connection_string") @property @pulumi.getter(name="primaryKey") def primary_key(self) -> str: """ A base64-encoded 256-bit primary key for signing and validating the SAS token. """ return pulumi.get(self, "primary_key") @property @pulumi.getter(name="secondaryConnectionString") def secondary_connection_string(self) -> str: """ Secondary connection string of the created namespace authorization rule. """ return pulumi.get(self, "secondary_connection_string") @property @pulumi.getter(name="secondaryKey") def secondary_key(self) -> str: """ A base64-encoded 256-bit primary key for signing and validating the SAS token. """ return pulumi.get(self, "secondary_key") class AwaitableListTopicKeysResult(ListTopicKeysResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return ListTopicKeysResult( alias_primary_connection_string=self.alias_primary_connection_string, alias_secondary_connection_string=self.alias_secondary_connection_string, key_name=self.key_name, primary_connection_string=self.primary_connection_string, primary_key=self.primary_key, secondary_connection_string=self.secondary_connection_string, secondary_key=self.secondary_key) def list_topic_keys(authorization_rule_name: Optional[str] = None, namespace_name: Optional[str] = None, resource_group_name: Optional[str] = None, topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicKeysResult: """ Namespace/ServiceBus Connection String API Version: 2017-04-01. :param str authorization_rule_name: The authorization rule name. :param str namespace_name: The namespace name :param str resource_group_name: Name of the Resource group within the Azure subscription. :param str topic_name: The topic name. """ __args__ = dict() __args__['authorizationRuleName'] = authorization_rule_name __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name __args__['topicName'] = topic_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value return AwaitableListTopicKeysResult( alias_primary_connection_string=__ret__.alias_primary_connection_string, alias_secondary_connection_string=__ret__.alias_secondary_connection_string, key_name=__ret__.key_name, primary_connection_string=__ret__.primary_connection_string, primary_key=__ret__.primary_key, secondary_connection_string=__ret__.secondary_connection_string, secondary_key=__ret__.secondary_key)
__init__
Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceAggregatedList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import ( Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional, ) from google.cloud.compute_v1.types import compute class AggregatedListPager: """A pager for iterating through ``aggregated_list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``AggregatedList`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ # MASKED: __init__ function (lines 48-71) def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]: for page in self.pages: yield from page.items.items() def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]: return self._response.items.get(key) def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPager: """A pager for iterating through ``list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``List`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceList], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.ListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.ListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[compute.TargetInstance]: for page in self.pages: yield from page.items def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
def __init__( self, method: Callable[..., compute.TargetInstanceAggregatedList], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceAggregatedList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.AggregatedListTargetInstancesRequest(request) self._response = response self._metadata = metadata
48
71
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import ( Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional, ) from google.cloud.compute_v1.types import compute class AggregatedListPager: """A pager for iterating through ``aggregated_list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``AggregatedList`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceAggregatedList], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceAggregatedList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.AggregatedListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]: for page in self.pages: yield from page.items.items() def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]: return self._response.items.get(key) def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPager: """A pager for iterating through ``list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``List`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceList], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.ListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.ListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[compute.TargetInstance]: for page in self.pages: yield from page.items def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
__init__
Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.ListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata.
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import ( Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional, ) from google.cloud.compute_v1.types import compute class AggregatedListPager: """A pager for iterating through ``aggregated_list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``AggregatedList`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceAggregatedList], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceAggregatedList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.AggregatedListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]: for page in self.pages: yield from page.items.items() def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]: return self._response.items.get(key) def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPager: """A pager for iterating through ``list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``List`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ # MASKED: __init__ function (lines 113-136) def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[compute.TargetInstance]: for page in self.pages: yield from page.items def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
def __init__( self, method: Callable[..., compute.TargetInstanceList], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.ListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.ListTargetInstancesRequest(request) self._response = response self._metadata = metadata
113
136
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from typing import ( Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional, ) from google.cloud.compute_v1.types import compute class AggregatedListPager: """A pager for iterating through ``aggregated_list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``AggregatedList`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceAggregatedList], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceAggregatedList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.AggregatedListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceAggregatedList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[Tuple[str, compute.TargetInstancesScopedList]]: for page in self.pages: yield from page.items.items() def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]: return self._response.items.get(key) def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) class ListPager: """A pager for iterating through ``list`` requests. This class thinly wraps an initial :class:`google.cloud.compute_v1.types.TargetInstanceList` object, and provides an ``__iter__`` method to iterate through its ``items`` field. If there are more pages, the ``__iter__`` method will make additional ``List`` requests and continue to iterate through the ``items`` field on the corresponding responses. All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ def __init__( self, method: Callable[..., compute.TargetInstanceList], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. Args: method (Callable): The method that was originally called, and which instantiated this pager. request (google.cloud.compute_v1.types.ListTargetInstancesRequest): The initial request object. response (google.cloud.compute_v1.types.TargetInstanceList): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = compute.ListTargetInstancesRequest(request) self._response = response self._metadata = metadata def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property def pages(self) -> Iterable[compute.TargetInstanceList]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response def __iter__(self) -> Iterable[compute.TargetInstance]: for page in self.pages: yield from page.items def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
__init__
Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by a comma. Each new line is a different sample. Example below: /path/to/audio.wav,/path/to/audio.txt ... :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds :param manifest_filepath: Path to manifest csv as describe above :param labels: String containing all the possible characters to map to :param normalize: Apply standard mean and deviation normalization to audio tensor :param speed_volume_perturb(default False): Apply random tempo and gain perturbations :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms
import os import subprocess from tempfile import NamedTemporaryFile from torch.distributed import get_rank from torch.distributed import get_world_size from torch.utils.data.sampler import Sampler import librosa import numpy as np import scipy.signal import torch from scipy.io.wavfile import read import math from torch.utils.data import DataLoader from torch.utils.data import Dataset from .spec_augment import spec_augment from hangul_utils import split_syllable_char, split_syllables, join_jamos windows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman, 'bartlett': scipy.signal.bartlett} def load_audio(path): # sample_rate, sound = read(path) sound, sr = librosa.load(path, sr=16000) # librosa.output.write_wav('org.wav', sound, sr) # print('save 1') # sound = sound.astype('float32') / 32767 # normalize audio sound = librosa.util.normalize(sound) # normalize audio sound = sound.astype('float32') # librosa.output.write_wav('norm.wav', sound, sr) # print('save 2') if len(sound.shape) > 1: if sound.shape[1] == 1: sound = sound.squeeze() else: sound = sound.mean(axis=1) # multiple channels, average return sound class AudioParser(object): def parse_transcript(self, transcript_path): """ :param transcript_path: Path where transcript is stored from the manifest file :return: Transcript in training/testing format """ raise NotImplementedError def parse_audio(self, audio_path): """ :param audio_path: Path where audio is stored from the manifest file :return: Audio in training/testing format """ raise NotImplementedError class NoiseInjection(object): def __init__(self, path=None, sample_rate=16000, noise_levels=(0, 0.5)): """ Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added. Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py """ if path is not None and not os.path.exists(path): print("Directory doesn't exist: {}".format(path)) raise IOError self.paths = path is not None and librosa.util.find_files(path) self.sample_rate = sample_rate self.noise_levels = noise_levels def inject_noise(self, data): noise_path = np.random.choice(self.paths) noise_level = np.random.uniform(*self.noise_levels) return self.inject_noise_sample(data, noise_path, noise_level) def inject_noise_sample(self, data, noise_path, noise_level): noise_len = get_audio_length(noise_path) data_len = len(data) / self.sample_rate noise_start = np.random.rand() * (noise_len - data_len) noise_end = noise_start + data_len noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end) assert len(data) == len(noise_dst) noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size) data_energy = np.sqrt(data.dot(data) / data.size) data += noise_level * noise_dst * data_energy / noise_energy return data class SpectrogramParser(AudioParser): def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False): """ Parses audio file into spectrogram with optional normalization and various augmentations :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds :param normalize(default False): Apply standard mean and deviation normalization to audio tensor :param speed_volume_perturb(default False): Apply random tempo and gain perturbations :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms """ super(SpectrogramParser, self).__init__() self.window_stride = audio_conf['window_stride'] self.window_size = audio_conf['window_size'] self.sample_rate = audio_conf['sample_rate'] self.window = windows.get(audio_conf['window'], windows['hamming']) self.normalize = normalize self.speed_volume_perturb = speed_volume_perturb self.spec_augment = spec_augment self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate, audio_conf['noise_levels']) if audio_conf.get( 'noise_dir') is not None else None self.noise_prob = audio_conf.get('noise_prob') def parse_audio(self, audio_path,audio=None,change_speed=None): if audio is not None: y = audio elif self.speed_volume_perturb: y = load_randomly_augmented_audio(audio_path, self.sample_rate) # librosa.output.write_wav('test.wav', y, sr=16000, norm=False) # print('test') else: y = load_audio(audio_path) # librosa.output.write_wav('y1.wav', y, sr=16000) # print('save@@@@@@@@@@@@') # change audio speed if change_speed is not None: y = librosa.effects.time_stretch(y, change_speed) if self.noiseInjector: add_noise = np.random.binomial(1, self.noise_prob) if add_noise: y = self.noiseInjector.inject_noise(y) # librosa.output.write_wav('y2.wav', y, sr=16000) # print('save@@@@@@@@@@@@') # import sys # sys.exit() n_fft = int(self.sample_rate * self.window_size) win_length = n_fft hop_length = int(self.sample_rate * self.window_stride) # STFT D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=self.window) spect, phase = librosa.magphase(D) # S = log(S+1) spect = np.log1p(spect) spect = torch.FloatTensor(spect) if self.normalize: mean = spect.mean() std = spect.std() spect.add_(-mean) spect.div_(std) if self.spec_augment: spect = spec_augment(spect) return spect def parse_transcript(self, transcript_path): raise NotImplementedError class SpectrogramDataset(Dataset, SpectrogramParser): # MASKED: __init__ function (lines 172-199) def __getitem__(self, index): sample = self.ids[index] audio_path, transcript_path = sample[0], sample[1] spect = self.parse_audio(audio_path) transcript = self.parse_transcript(transcript_path) return spect, transcript def parse_transcript(self, transcript_path): with open(transcript_path, 'r', encoding='utf8') as transcript_file: # with open(transcript_path, 'r', encoding='utf-16') as transcript_file: transcript = transcript_file.read().replace('\n', '') if self.use_jamo: transcript = split_syllables(transcript) transcript = list(filter(None, [self.labels_map.get(x) for x in list(transcript)])) return transcript def __len__(self): return self.size def _collate_fn(batch): def func(p): return p[0].size(1) batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True) longest_sample = max(batch, key=func)[0] freq_size = longest_sample.size(0) minibatch_size = len(batch) max_seqlength = longest_sample.size(1) inputs = torch.zeros(minibatch_size, 1, freq_size, max_seqlength) input_percentages = torch.FloatTensor(minibatch_size) target_sizes = torch.IntTensor(minibatch_size) targets = [] for x in range(minibatch_size): sample = batch[x] tensor = sample[0] target = sample[1] seq_length = tensor.size(1) inputs[x][0].narrow(1, 0, seq_length).copy_(tensor) input_percentages[x] = seq_length / float(max_seqlength) target_sizes[x] = len(target) targets.extend(target) targets = torch.IntTensor(targets) return inputs, targets, input_percentages, target_sizes class AudioDataLoader(DataLoader): def __init__(self, *args, **kwargs): """ Creates a data loader for AudioDatasets. """ super(AudioDataLoader, self).__init__(*args, **kwargs) self.collate_fn = _collate_fn class BucketingSampler(Sampler): def __init__(self, data_source, batch_size=1): """ Samples batches assuming they are in order of size to batch similarly sized samples together. """ super(BucketingSampler, self).__init__(data_source) self.data_source = data_source ids = list(range(0, len(data_source))) self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)] def __iter__(self): for ids in self.bins: np.random.shuffle(ids) yield ids def __len__(self): return len(self.bins) def shuffle(self, epoch): np.random.shuffle(self.bins) class DistributedBucketingSampler(Sampler): def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None): """ Samples batches assuming they are in order of size to batch similarly sized samples together. """ super(DistributedBucketingSampler, self).__init__(data_source) if num_replicas is None: num_replicas = get_world_size() if rank is None: rank = get_rank() self.data_source = data_source self.ids = list(range(0, len(data_source))) self.batch_size = batch_size self.bins = [self.ids[i:i + batch_size] for i in range(0, len(self.ids), batch_size)] self.num_replicas = num_replicas self.rank = rank self.num_samples = int(math.ceil(len(self.bins) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas def __iter__(self): offset = self.rank # add extra samples to make it evenly divisible bins = self.bins + self.bins[:(self.total_size - len(self.bins))] assert len(bins) == self.total_size samples = bins[offset::self.num_replicas] # Get every Nth bin, starting from rank return iter(samples) def __len__(self): return self.num_samples def shuffle(self, epoch): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(epoch) bin_ids = list(torch.randperm(len(self.bins), generator=g)) self.bins = [self.bins[i] for i in bin_ids] def get_audio_length(path): output = subprocess.check_output(['soxi -D \"%s\"' % path.strip()], shell=True) return float(output) def audio_with_sox(path, sample_rate, start_time, end_time): """ crop and resample the recording with sox and loads it. """ with NamedTemporaryFile(suffix=".wav") as tar_file: tar_filename = tar_file.name sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate, tar_filename, start_time, end_time) os.system(sox_params) y = load_audio(tar_filename) return y def augment_audio_with_sox(path, sample_rate, tempo, gain): """ Changes tempo and gain of the recording with sox and loads it. """ with NamedTemporaryFile(suffix=".wav") as augmented_file: augmented_filename = augmented_file.name sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)] sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate, augmented_filename, " ".join(sox_augment_params)) os.system(sox_params) y = load_audio(augmented_filename) return y # original tempo_range=(0.85,1.15) # original gain_range=(-6,8) def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85,1.15), gain_range=(-6, 8)): """ Picks tempo and gain uniformly, applies it to the utterance by using sox utility. Returns the augmented utterance. """ low_tempo, high_tempo = tempo_range tempo_value = np.random.uniform(low=low_tempo, high=high_tempo) low_gain, high_gain = gain_range gain_value = np.random.uniform(low=low_gain, high=high_gain) audio = augment_audio_with_sox(path=path, sample_rate=sample_rate, tempo=tempo_value, gain=gain_value) return audio
def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False): """ Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by a comma. Each new line is a different sample. Example below: /path/to/audio.wav,/path/to/audio.txt ... :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds :param manifest_filepath: Path to manifest csv as describe above :param labels: String containing all the possible characters to map to :param normalize: Apply standard mean and deviation normalization to audio tensor :param speed_volume_perturb(default False): Apply random tempo and gain perturbations :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms """ with open(manifest_filepath) as f: ids = f.readlines() ids = [x.strip().split(',') for x in ids] self.ids = ids self.size = len(ids) self.labels_map = dict([(labels[i], i) for i in range(len(labels))]) try: self.use_jamo = audio_conf['use_jamo'] except: self.use_jamo = False super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment)
172
199
import os import subprocess from tempfile import NamedTemporaryFile from torch.distributed import get_rank from torch.distributed import get_world_size from torch.utils.data.sampler import Sampler import librosa import numpy as np import scipy.signal import torch from scipy.io.wavfile import read import math from torch.utils.data import DataLoader from torch.utils.data import Dataset from .spec_augment import spec_augment from hangul_utils import split_syllable_char, split_syllables, join_jamos windows = {'hamming': scipy.signal.hamming, 'hann': scipy.signal.hann, 'blackman': scipy.signal.blackman, 'bartlett': scipy.signal.bartlett} def load_audio(path): # sample_rate, sound = read(path) sound, sr = librosa.load(path, sr=16000) # librosa.output.write_wav('org.wav', sound, sr) # print('save 1') # sound = sound.astype('float32') / 32767 # normalize audio sound = librosa.util.normalize(sound) # normalize audio sound = sound.astype('float32') # librosa.output.write_wav('norm.wav', sound, sr) # print('save 2') if len(sound.shape) > 1: if sound.shape[1] == 1: sound = sound.squeeze() else: sound = sound.mean(axis=1) # multiple channels, average return sound class AudioParser(object): def parse_transcript(self, transcript_path): """ :param transcript_path: Path where transcript is stored from the manifest file :return: Transcript in training/testing format """ raise NotImplementedError def parse_audio(self, audio_path): """ :param audio_path: Path where audio is stored from the manifest file :return: Audio in training/testing format """ raise NotImplementedError class NoiseInjection(object): def __init__(self, path=None, sample_rate=16000, noise_levels=(0, 0.5)): """ Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added. Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py """ if path is not None and not os.path.exists(path): print("Directory doesn't exist: {}".format(path)) raise IOError self.paths = path is not None and librosa.util.find_files(path) self.sample_rate = sample_rate self.noise_levels = noise_levels def inject_noise(self, data): noise_path = np.random.choice(self.paths) noise_level = np.random.uniform(*self.noise_levels) return self.inject_noise_sample(data, noise_path, noise_level) def inject_noise_sample(self, data, noise_path, noise_level): noise_len = get_audio_length(noise_path) data_len = len(data) / self.sample_rate noise_start = np.random.rand() * (noise_len - data_len) noise_end = noise_start + data_len noise_dst = audio_with_sox(noise_path, self.sample_rate, noise_start, noise_end) assert len(data) == len(noise_dst) noise_energy = np.sqrt(noise_dst.dot(noise_dst) / noise_dst.size) data_energy = np.sqrt(data.dot(data) / data.size) data += noise_level * noise_dst * data_energy / noise_energy return data class SpectrogramParser(AudioParser): def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False): """ Parses audio file into spectrogram with optional normalization and various augmentations :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds :param normalize(default False): Apply standard mean and deviation normalization to audio tensor :param speed_volume_perturb(default False): Apply random tempo and gain perturbations :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms """ super(SpectrogramParser, self).__init__() self.window_stride = audio_conf['window_stride'] self.window_size = audio_conf['window_size'] self.sample_rate = audio_conf['sample_rate'] self.window = windows.get(audio_conf['window'], windows['hamming']) self.normalize = normalize self.speed_volume_perturb = speed_volume_perturb self.spec_augment = spec_augment self.noiseInjector = NoiseInjection(audio_conf['noise_dir'], self.sample_rate, audio_conf['noise_levels']) if audio_conf.get( 'noise_dir') is not None else None self.noise_prob = audio_conf.get('noise_prob') def parse_audio(self, audio_path,audio=None,change_speed=None): if audio is not None: y = audio elif self.speed_volume_perturb: y = load_randomly_augmented_audio(audio_path, self.sample_rate) # librosa.output.write_wav('test.wav', y, sr=16000, norm=False) # print('test') else: y = load_audio(audio_path) # librosa.output.write_wav('y1.wav', y, sr=16000) # print('save@@@@@@@@@@@@') # change audio speed if change_speed is not None: y = librosa.effects.time_stretch(y, change_speed) if self.noiseInjector: add_noise = np.random.binomial(1, self.noise_prob) if add_noise: y = self.noiseInjector.inject_noise(y) # librosa.output.write_wav('y2.wav', y, sr=16000) # print('save@@@@@@@@@@@@') # import sys # sys.exit() n_fft = int(self.sample_rate * self.window_size) win_length = n_fft hop_length = int(self.sample_rate * self.window_stride) # STFT D = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=self.window) spect, phase = librosa.magphase(D) # S = log(S+1) spect = np.log1p(spect) spect = torch.FloatTensor(spect) if self.normalize: mean = spect.mean() std = spect.std() spect.add_(-mean) spect.div_(std) if self.spec_augment: spect = spec_augment(spect) return spect def parse_transcript(self, transcript_path): raise NotImplementedError class SpectrogramDataset(Dataset, SpectrogramParser): def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False): """ Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by a comma. Each new line is a different sample. Example below: /path/to/audio.wav,/path/to/audio.txt ... :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds :param manifest_filepath: Path to manifest csv as describe above :param labels: String containing all the possible characters to map to :param normalize: Apply standard mean and deviation normalization to audio tensor :param speed_volume_perturb(default False): Apply random tempo and gain perturbations :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms """ with open(manifest_filepath) as f: ids = f.readlines() ids = [x.strip().split(',') for x in ids] self.ids = ids self.size = len(ids) self.labels_map = dict([(labels[i], i) for i in range(len(labels))]) try: self.use_jamo = audio_conf['use_jamo'] except: self.use_jamo = False super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment) def __getitem__(self, index): sample = self.ids[index] audio_path, transcript_path = sample[0], sample[1] spect = self.parse_audio(audio_path) transcript = self.parse_transcript(transcript_path) return spect, transcript def parse_transcript(self, transcript_path): with open(transcript_path, 'r', encoding='utf8') as transcript_file: # with open(transcript_path, 'r', encoding='utf-16') as transcript_file: transcript = transcript_file.read().replace('\n', '') if self.use_jamo: transcript = split_syllables(transcript) transcript = list(filter(None, [self.labels_map.get(x) for x in list(transcript)])) return transcript def __len__(self): return self.size def _collate_fn(batch): def func(p): return p[0].size(1) batch = sorted(batch, key=lambda sample: sample[0].size(1), reverse=True) longest_sample = max(batch, key=func)[0] freq_size = longest_sample.size(0) minibatch_size = len(batch) max_seqlength = longest_sample.size(1) inputs = torch.zeros(minibatch_size, 1, freq_size, max_seqlength) input_percentages = torch.FloatTensor(minibatch_size) target_sizes = torch.IntTensor(minibatch_size) targets = [] for x in range(minibatch_size): sample = batch[x] tensor = sample[0] target = sample[1] seq_length = tensor.size(1) inputs[x][0].narrow(1, 0, seq_length).copy_(tensor) input_percentages[x] = seq_length / float(max_seqlength) target_sizes[x] = len(target) targets.extend(target) targets = torch.IntTensor(targets) return inputs, targets, input_percentages, target_sizes class AudioDataLoader(DataLoader): def __init__(self, *args, **kwargs): """ Creates a data loader for AudioDatasets. """ super(AudioDataLoader, self).__init__(*args, **kwargs) self.collate_fn = _collate_fn class BucketingSampler(Sampler): def __init__(self, data_source, batch_size=1): """ Samples batches assuming they are in order of size to batch similarly sized samples together. """ super(BucketingSampler, self).__init__(data_source) self.data_source = data_source ids = list(range(0, len(data_source))) self.bins = [ids[i:i + batch_size] for i in range(0, len(ids), batch_size)] def __iter__(self): for ids in self.bins: np.random.shuffle(ids) yield ids def __len__(self): return len(self.bins) def shuffle(self, epoch): np.random.shuffle(self.bins) class DistributedBucketingSampler(Sampler): def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None): """ Samples batches assuming they are in order of size to batch similarly sized samples together. """ super(DistributedBucketingSampler, self).__init__(data_source) if num_replicas is None: num_replicas = get_world_size() if rank is None: rank = get_rank() self.data_source = data_source self.ids = list(range(0, len(data_source))) self.batch_size = batch_size self.bins = [self.ids[i:i + batch_size] for i in range(0, len(self.ids), batch_size)] self.num_replicas = num_replicas self.rank = rank self.num_samples = int(math.ceil(len(self.bins) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas def __iter__(self): offset = self.rank # add extra samples to make it evenly divisible bins = self.bins + self.bins[:(self.total_size - len(self.bins))] assert len(bins) == self.total_size samples = bins[offset::self.num_replicas] # Get every Nth bin, starting from rank return iter(samples) def __len__(self): return self.num_samples def shuffle(self, epoch): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(epoch) bin_ids = list(torch.randperm(len(self.bins), generator=g)) self.bins = [self.bins[i] for i in bin_ids] def get_audio_length(path): output = subprocess.check_output(['soxi -D \"%s\"' % path.strip()], shell=True) return float(output) def audio_with_sox(path, sample_rate, start_time, end_time): """ crop and resample the recording with sox and loads it. """ with NamedTemporaryFile(suffix=".wav") as tar_file: tar_filename = tar_file.name sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1".format(path, sample_rate, tar_filename, start_time, end_time) os.system(sox_params) y = load_audio(tar_filename) return y def augment_audio_with_sox(path, sample_rate, tempo, gain): """ Changes tempo and gain of the recording with sox and loads it. """ with NamedTemporaryFile(suffix=".wav") as augmented_file: augmented_filename = augmented_file.name sox_augment_params = ["tempo", "{:.3f}".format(tempo), "gain", "{:.3f}".format(gain)] sox_params = "sox \"{}\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1".format(path, sample_rate, augmented_filename, " ".join(sox_augment_params)) os.system(sox_params) y = load_audio(augmented_filename) return y # original tempo_range=(0.85,1.15) # original gain_range=(-6,8) def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85,1.15), gain_range=(-6, 8)): """ Picks tempo and gain uniformly, applies it to the utterance by using sox utility. Returns the augmented utterance. """ low_tempo, high_tempo = tempo_range tempo_value = np.random.uniform(low=low_tempo, high=high_tempo) low_gain, high_gain = gain_range gain_value = np.random.uniform(low=low_gain, high=high_gain) audio = augment_audio_with_sox(path=path, sample_rate=sample_rate, tempo=tempo_value, gain=gain_value) return audio
validate_meta_info
Validate meta information Adds 'BIDS.NA' if no BIDS info present Adds 'BIDS.valid' and 'BIDS.error_message' to communicate to user if values are valid Currently, validation is only checking if mandatory properties are non-empty strings Could add the following checks: Are the values alpha numeric?
import argparse import logging import json import os import tempfile import sys import re import flywheel from .supporting_files import bidsify_flywheel, utils, templates from .supporting_files.project_tree import get_project_tree logging.basicConfig(level=logging.INFO) logger = logging.getLogger('curate-bids') def clear_meta_info(context, template): if 'info' in context and template.namespace in context['info']: del context['info'][template.namespace] def format_validation_error(err): path = '/'.join(err.path) if path: return path + ' ' + err.message return err.message # MASKED: validate_meta_info function (lines 27-78) def update_meta_info(fw, context): """ Update file information """ # Modify file if context['container_type'] == 'file': # Modify acquisition file if context['parent_container_type'] == 'acquisition': fw.set_acquisition_file_info( context['acquisition']['id'], context['file']['name'], context['file']['info'] ) # Modify project file elif context['parent_container_type'] == 'project': fw.set_project_file_info( context['project']['id'], context['file']['name'], context['file']['info'] ) # Modify session file elif context['parent_container_type'] == 'session': fw.set_session_file_info( context['session']['id'], context['file']['name'], context['file']['info'] ) else: logger.info('Cannot determine file parent container type: ' + context['parent_container_type']) # Modify project elif context['container_type'] == 'project': fw.replace_project_info(context['project']['id'], context['project']['info']) # Modify session elif context['container_type'] == 'session': fw.replace_session_info(context['session']['id'], context['session']['info']) # Modify acquisition elif context['container_type'] == 'acquisition': fw.replace_acquisition_info(context['acquisition']['id'], context['acquisition']['info']) # Cannot determine container type else: logger.info('Cannot determine container type: ' + context['container_type']) def curate_bids_dir(fw, project_id, session_id=None, reset=False, template_file=None, session_only=False): """ fw: Flywheel client project_id: project id of project to curate session_id: The optional session id to curate reset: Whether or not to reset bids info before curation template_file: The template file to use session_only: If true, then only curate the provided session """ project = get_project_tree(fw, project_id, session_id=session_id, session_only=session_only) curate_bids_tree(fw, project, reset, template_file, True) def curate_bids_tree(fw, project, reset=False, template_file=None, update=True): # Get project project_files = project.get('files', []) # Get template (for now, just use default) template = templates.DEFAULT_TEMPLATE # Check for project file if not template_file: template_filename = utils.find_custom_template(project_files) if template_filename: fd, path = tempfile.mkstemp('.json') os.close(fd) logger.info('Using project template: {0}'.format(template_filename)) fw.download_file_from_project(project['id'], template_filename, path) template_file = path if template_file: template = templates.loadTemplate(template_file) ## # Curation is now a 3-pass process # 1. Do initial template matching and updating # 2. Perform any path resolutions # 3. Send updates to server ## # 1. Do initial template matching and updating for context in project.context_iter(): ctype = context['container_type'] parent_ctype = context['parent_container_type'] if reset: clear_meta_info(context[ctype], template) elif context[ctype].get('info',{}).get('BIDS') == 'NA': continue if ctype == 'project': bidsify_flywheel.process_matching_templates(context, template) # Validate meta information # TODO: Improve the validator to understand what is valid for dataset_description file... # validate_meta_info(context['project']) elif ctype == 'session': bidsify_flywheel.process_matching_templates(context, template) # Add run_counter context['run_counters'] = utils.RunCounterMap() elif ctype == 'acquisition': bidsify_flywheel.process_matching_templates(context, template) elif ctype == 'file': if parent_ctype == 'project' and PROJECT_TEMPLATE_FILE_NAME_REGEX.search(context['file']['name']): # Don't BIDSIFY project template continue # Process matching context['file'] = bidsify_flywheel.process_matching_templates(context, template) # Validate meta information validate_meta_info(context['file'], template) # 2. Perform any path resolutions session = None for context in project.context_iter(): # Resolution bidsify_flywheel.process_resolvers(context, template) # 3. Send updates to server if update: for context in project.context_iter(): ctype = context['container_type'] node = context[ctype] if node.is_dirty(): update_meta_info(fw, context) def main_with_args(api_key, session_id, reset, session_only): ### Prep # Check API key - raises Error if key is invalid fw = flywheel.Flywheel(api_key) if session_id: project_id = utils.get_project_id_from_session_id(fw, session_id) else: print('Session id is required!') sys.exit(1) ### Curate BIDS project curate_bids_dir(fw, project_id, session_id, reset=reset, session_only=session_only) def main(): ### Read in arguments parser = argparse.ArgumentParser(description='BIDS Curation') parser.add_argument('--api-key', dest='api_key', action='store', required=True, help='API key') parser.add_argument('-p', dest='project_label', action='store', required=False, default=None, help='Project Label on Flywheel instance') parser.add_argument('--session', dest='session_id', action='store', required=False, default=None, help='Session ID, used to look up project if project label is not readily available') parser.add_argument('--reset', dest='reset', action='store_true', default=False, help='Reset BIDS data before running') parser.add_argument('--session-only', dest='session_only', action='store_true', default=False, help='Only curate the session identified by --session') parser.add_argument('--template-file', dest='template_file', action='store', default=None, help='Template file to use') args = parser.parse_args() ### Prep # Check API key - raises Error if key is invalid fw = flywheel.Flywheel(args.api_key) # Get project id from label if args.project_label: project_id = utils.validate_project_label(fw, args.project_label) elif args.session_id: project_id = utils.get_project_id_from_session_id(fw, args.session_id) else: print('Either project label or session id is required!') sys.exit(1) ### Curate BIDS project curate_bids_dir(fw, project_id, args.session_id, reset=args.reset, template_file=args.template_file, session_only=args.session_only) if __name__ == '__main__': main()
def validate_meta_info(container, template): """ Validate meta information Adds 'BIDS.NA' if no BIDS info present Adds 'BIDS.valid' and 'BIDS.error_message' to communicate to user if values are valid Currently, validation is only checking if mandatory properties are non-empty strings Could add the following checks: Are the values alpha numeric? """ # Get namespace namespace = template.namespace # If 'info' is NOT in container, then must not # have matched to a template, create 'info' # field with object {'BIDS': 'NA'} if 'info' not in container: container['info'] = {namespace: 'NA'} # if the namespace ('BIDS') is NOT in 'info', # then must not have matched to a template, # add {'BIDS': 'NA'} to the meta info elif namespace not in container['info']: container['info'][namespace] = 'NA' # If already assigned BIDS 'NA', then break elif container['info'][namespace] == 'NA': pass # Otherwise, iterate over keys within container else: valid = True error_message = '' # Find template templateName = container['info'][namespace].get('template') if templateName: templateDef = template.definitions.get(templateName) if templateDef: errors = template.validate(templateDef, container['info'][namespace]) if errors: valid = False error_message = '\n'.join([format_validation_error(err) for err in errors]) else: valid = False error_message += 'Unknown template: %s. ' % templateName # Assign 'valid' and 'error_message' values container['info'][namespace]['valid'] = valid container['info'][namespace]['error_message'] = error_message
27
78
import argparse import logging import json import os import tempfile import sys import re import flywheel from .supporting_files import bidsify_flywheel, utils, templates from .supporting_files.project_tree import get_project_tree logging.basicConfig(level=logging.INFO) logger = logging.getLogger('curate-bids') def clear_meta_info(context, template): if 'info' in context and template.namespace in context['info']: del context['info'][template.namespace] def format_validation_error(err): path = '/'.join(err.path) if path: return path + ' ' + err.message return err.message def validate_meta_info(container, template): """ Validate meta information Adds 'BIDS.NA' if no BIDS info present Adds 'BIDS.valid' and 'BIDS.error_message' to communicate to user if values are valid Currently, validation is only checking if mandatory properties are non-empty strings Could add the following checks: Are the values alpha numeric? """ # Get namespace namespace = template.namespace # If 'info' is NOT in container, then must not # have matched to a template, create 'info' # field with object {'BIDS': 'NA'} if 'info' not in container: container['info'] = {namespace: 'NA'} # if the namespace ('BIDS') is NOT in 'info', # then must not have matched to a template, # add {'BIDS': 'NA'} to the meta info elif namespace not in container['info']: container['info'][namespace] = 'NA' # If already assigned BIDS 'NA', then break elif container['info'][namespace] == 'NA': pass # Otherwise, iterate over keys within container else: valid = True error_message = '' # Find template templateName = container['info'][namespace].get('template') if templateName: templateDef = template.definitions.get(templateName) if templateDef: errors = template.validate(templateDef, container['info'][namespace]) if errors: valid = False error_message = '\n'.join([format_validation_error(err) for err in errors]) else: valid = False error_message += 'Unknown template: %s. ' % templateName # Assign 'valid' and 'error_message' values container['info'][namespace]['valid'] = valid container['info'][namespace]['error_message'] = error_message def update_meta_info(fw, context): """ Update file information """ # Modify file if context['container_type'] == 'file': # Modify acquisition file if context['parent_container_type'] == 'acquisition': fw.set_acquisition_file_info( context['acquisition']['id'], context['file']['name'], context['file']['info'] ) # Modify project file elif context['parent_container_type'] == 'project': fw.set_project_file_info( context['project']['id'], context['file']['name'], context['file']['info'] ) # Modify session file elif context['parent_container_type'] == 'session': fw.set_session_file_info( context['session']['id'], context['file']['name'], context['file']['info'] ) else: logger.info('Cannot determine file parent container type: ' + context['parent_container_type']) # Modify project elif context['container_type'] == 'project': fw.replace_project_info(context['project']['id'], context['project']['info']) # Modify session elif context['container_type'] == 'session': fw.replace_session_info(context['session']['id'], context['session']['info']) # Modify acquisition elif context['container_type'] == 'acquisition': fw.replace_acquisition_info(context['acquisition']['id'], context['acquisition']['info']) # Cannot determine container type else: logger.info('Cannot determine container type: ' + context['container_type']) def curate_bids_dir(fw, project_id, session_id=None, reset=False, template_file=None, session_only=False): """ fw: Flywheel client project_id: project id of project to curate session_id: The optional session id to curate reset: Whether or not to reset bids info before curation template_file: The template file to use session_only: If true, then only curate the provided session """ project = get_project_tree(fw, project_id, session_id=session_id, session_only=session_only) curate_bids_tree(fw, project, reset, template_file, True) def curate_bids_tree(fw, project, reset=False, template_file=None, update=True): # Get project project_files = project.get('files', []) # Get template (for now, just use default) template = templates.DEFAULT_TEMPLATE # Check for project file if not template_file: template_filename = utils.find_custom_template(project_files) if template_filename: fd, path = tempfile.mkstemp('.json') os.close(fd) logger.info('Using project template: {0}'.format(template_filename)) fw.download_file_from_project(project['id'], template_filename, path) template_file = path if template_file: template = templates.loadTemplate(template_file) ## # Curation is now a 3-pass process # 1. Do initial template matching and updating # 2. Perform any path resolutions # 3. Send updates to server ## # 1. Do initial template matching and updating for context in project.context_iter(): ctype = context['container_type'] parent_ctype = context['parent_container_type'] if reset: clear_meta_info(context[ctype], template) elif context[ctype].get('info',{}).get('BIDS') == 'NA': continue if ctype == 'project': bidsify_flywheel.process_matching_templates(context, template) # Validate meta information # TODO: Improve the validator to understand what is valid for dataset_description file... # validate_meta_info(context['project']) elif ctype == 'session': bidsify_flywheel.process_matching_templates(context, template) # Add run_counter context['run_counters'] = utils.RunCounterMap() elif ctype == 'acquisition': bidsify_flywheel.process_matching_templates(context, template) elif ctype == 'file': if parent_ctype == 'project' and PROJECT_TEMPLATE_FILE_NAME_REGEX.search(context['file']['name']): # Don't BIDSIFY project template continue # Process matching context['file'] = bidsify_flywheel.process_matching_templates(context, template) # Validate meta information validate_meta_info(context['file'], template) # 2. Perform any path resolutions session = None for context in project.context_iter(): # Resolution bidsify_flywheel.process_resolvers(context, template) # 3. Send updates to server if update: for context in project.context_iter(): ctype = context['container_type'] node = context[ctype] if node.is_dirty(): update_meta_info(fw, context) def main_with_args(api_key, session_id, reset, session_only): ### Prep # Check API key - raises Error if key is invalid fw = flywheel.Flywheel(api_key) if session_id: project_id = utils.get_project_id_from_session_id(fw, session_id) else: print('Session id is required!') sys.exit(1) ### Curate BIDS project curate_bids_dir(fw, project_id, session_id, reset=reset, session_only=session_only) def main(): ### Read in arguments parser = argparse.ArgumentParser(description='BIDS Curation') parser.add_argument('--api-key', dest='api_key', action='store', required=True, help='API key') parser.add_argument('-p', dest='project_label', action='store', required=False, default=None, help='Project Label on Flywheel instance') parser.add_argument('--session', dest='session_id', action='store', required=False, default=None, help='Session ID, used to look up project if project label is not readily available') parser.add_argument('--reset', dest='reset', action='store_true', default=False, help='Reset BIDS data before running') parser.add_argument('--session-only', dest='session_only', action='store_true', default=False, help='Only curate the session identified by --session') parser.add_argument('--template-file', dest='template_file', action='store', default=None, help='Template file to use') args = parser.parse_args() ### Prep # Check API key - raises Error if key is invalid fw = flywheel.Flywheel(args.api_key) # Get project id from label if args.project_label: project_id = utils.validate_project_label(fw, args.project_label) elif args.session_id: project_id = utils.get_project_id_from_session_id(fw, args.session_id) else: print('Either project label or session id is required!') sys.exit(1) ### Curate BIDS project curate_bids_dir(fw, project_id, args.session_id, reset=args.reset, template_file=args.template_file, session_only=args.session_only) if __name__ == '__main__': main()
__add
:param name: 系列名称,用于 tooltip 的显示,legend 的图例筛选。 :param x_axis: x 坐标轴数据。 :param y_axis: y 坐标轴数据。数据中,每一行是一个『数据项』,每一列属于一个『维度』。 数据项具体为 [open, close, lowest, highest] (即:[开盘值, 收盘值, 最低值, 最高值])。 :param kwargs:
# coding=utf-8 from pyecharts.chart import Chart def kline_tooltip_formatter(params): text = ( params[0].seriesName + "<br/>" + "- open:" + params[0].data[1] + "<br/>" + "- close:" + params[0].data[2] + "<br/>" + "- lowest:" + params[0].data[3] + "<br/>" + "- highest:" + params[0].data[4] ) return text class Kline(Chart): """ <<< K 线图 >>> 红涨蓝跌 """ def __init__(self, title="", subtitle="", **kwargs): super(Kline, self).__init__(title, subtitle, **kwargs) def add(self, *args, **kwargs): self.__add(*args, **kwargs) return self # MASKED: __add function (lines 39-77)
def __add(self, name, x_axis, y_axis, **kwargs): """ :param name: 系列名称,用于 tooltip 的显示,legend 的图例筛选。 :param x_axis: x 坐标轴数据。 :param y_axis: y 坐标轴数据。数据中,每一行是一个『数据项』,每一列属于一个『维度』。 数据项具体为 [open, close, lowest, highest] (即:[开盘值, 收盘值, 最低值, 最高值])。 :param kwargs: """ kwargs.update(type="candlestick", x_axis=x_axis) if "tooltip_formatter" not in kwargs: kwargs["tooltip_formatter"] = kline_tooltip_formatter if "tooltip_trigger" not in kwargs: kwargs["tooltip_trigger"] = "axis" chart = self._get_all_options(**kwargs) xaxis, yaxis = chart["xy_axis"] self._option.update(xAxis=xaxis, yAxis=yaxis) self._option.get("xAxis")[0]["scale"] = True self._option.get("yAxis")[0]["scale"] = True self._option.get("yAxis")[0]["splitArea"] = {"show": True} self._option.get("legend")[0].get("data").append(name) self._option.get("series").append( { "type": "candlestick", "name": name, "data": y_axis, "markPoint": chart["mark_point"], "markLine": chart["mark_line"], "seriesId": self._option.get("series_id"), } ) self._config_components(**kwargs)
39
77
# coding=utf-8 from pyecharts.chart import Chart def kline_tooltip_formatter(params): text = ( params[0].seriesName + "<br/>" + "- open:" + params[0].data[1] + "<br/>" + "- close:" + params[0].data[2] + "<br/>" + "- lowest:" + params[0].data[3] + "<br/>" + "- highest:" + params[0].data[4] ) return text class Kline(Chart): """ <<< K 线图 >>> 红涨蓝跌 """ def __init__(self, title="", subtitle="", **kwargs): super(Kline, self).__init__(title, subtitle, **kwargs) def add(self, *args, **kwargs): self.__add(*args, **kwargs) return self def __add(self, name, x_axis, y_axis, **kwargs): """ :param name: 系列名称,用于 tooltip 的显示,legend 的图例筛选。 :param x_axis: x 坐标轴数据。 :param y_axis: y 坐标轴数据。数据中,每一行是一个『数据项』,每一列属于一个『维度』。 数据项具体为 [open, close, lowest, highest] (即:[开盘值, 收盘值, 最低值, 最高值])。 :param kwargs: """ kwargs.update(type="candlestick", x_axis=x_axis) if "tooltip_formatter" not in kwargs: kwargs["tooltip_formatter"] = kline_tooltip_formatter if "tooltip_trigger" not in kwargs: kwargs["tooltip_trigger"] = "axis" chart = self._get_all_options(**kwargs) xaxis, yaxis = chart["xy_axis"] self._option.update(xAxis=xaxis, yAxis=yaxis) self._option.get("xAxis")[0]["scale"] = True self._option.get("yAxis")[0]["scale"] = True self._option.get("yAxis")[0]["splitArea"] = {"show": True} self._option.get("legend")[0].get("data").append(name) self._option.get("series").append( { "type": "candlestick", "name": name, "data": y_axis, "markPoint": chart["mark_point"], "markLine": chart["mark_line"], "seriesId": self._option.get("series_id"), } ) self._config_components(**kwargs)
PsdArray2Noise_1d
Generates a noise pattern whose Power Spectral density is given by Psd. Parameters --------------------- Psd : 1d array Contains the numeric Psd (treated as evenly spaced array) Semiaxis : 0 : does nothing 1 : halvens Pds, then replicates the halven part for left frequencies, producing an output as long as Psd 2 : replicates all Pds for lef frequencies as well, producing an output twice as long as Psd Real : boolean If True, the real part of the output is returned (default) Returns: --------------------- An array of the same length of Psd
# -*- coding: utf-8 -*- """ Created on Thu Jul 07 14:08:31 2016 @author: Mic """ from __future__ import division from wiselib2.must import * import numpy as np import wiselib2.Rayman as rm Gauss1d = lambda x ,y : None from scipy import interpolate as interpolate from matplotlib import pyplot as plt class PsdFuns: ''' Ensemble of possible Psd Functions. Each element is a callable Psd. Most used are PsdFuns.PowerLaw(x,a,b) PsdFuns.Interp(x, xData, yData) ''' @staticmethod def Flat(x, *args): N = len(x) return np.zeros([1,N]) +1 @staticmethod def PowerLaw(x,a,b): return a*x**b @staticmethod def Gaussian(x,sigma, x0=0): return np.exp(-0.5 * (x-x0)**2/sigma**2) @staticmethod def Interp(x, xData, yData): f = interpolate.interp1d(xData, yData) return f(x) def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs): ''' Generates a noise pattern based an the Power spectral density returned by PsdFun ''' x = np.arange(0,N//2+1, dx) yHalf = PsdFun(x, *PsdArgs) y = Psd2NoisePattern_1d(yHalf, Semiaxis = True ) return x,y #============================================================================ # FUN: PsdArray2Noise_1d_v2 #============================================================================ def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N): ''' Returns meters ''' from scipy import interpolate log=np.log fft = np.fft.fft fftshift = np.fft.fftshift ff = f_in yy = Psd_in L = L_mm N = int(N) N2 = int(N//2) L =300 # (mm) L_um = L*1e3 L_nm = L*1e6 fMin = 1/L_um ##vecchia riga ##fSpline = (np.array(range(N2))+1)/L_um # um^-1 fSpline = np.arange(N2)/N2 * (max(ff) - min(ff)) + min(ff) fun = interpolate.splrep(log(ff), log(yy), s=2) yPsd_log = interpolate.splev(log(fSpline), fun) ySpline = np.exp(yPsd_log) yPsd = ySpline # tolgo yPsd[fSpline<ff[0]] = 200 n = len(yPsd) plt.plot(fSpline, yPsd,'-') plt.plot(ff, yy,'x') plt.legend(['ySpline','Data']) ax = plt.axes() #ax.set_yscale('log') #ax.set_xscale('log') #% controllo RMS integrando la yPsd import scipy.integrate as integrate RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000)) #% Modo Manfredda style #yPsdNorm = np.sqrt(yPsd/L_um/1000) #yPsdNorm_reverse = yPsdNorm[::-1] yPsd_reverse = yPsd[::-1] ell= 1/(fSpline[1] - fSpline[0]) if N%2 == 0: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1])) else: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd)) ##yPsd2Norm = np.sqrt(yPsd2/ell/1000/2) yPsd2Norm = np.sqrt(yPsd2/ell/1000) n_ = len(yPsd2) print('len(yPsd2) = %0.2d' % len(yPsd2Norm)) phi = 2*np.pi * np.random.rand(n_) r = np.exp(1j*phi) yPsd2Norm_ = fftshift(yPsd2Norm) #yPsd2Norm_[len(yPsd2Norm_)//2] = 0 yRaf = np.fft.fft(r*yPsd2Norm_) yRaf = np.real(yRaf) print('Rms = %0.2e nm' % np.std(yRaf)) plt.plot(yPsd2Norm_) print('max yPsd_ = %d nm' % max(yPsd2)) print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm)) print('Rms yRaf2 = %0.2e nm' % np.std(yRaf)) return yRaf * 1e-9 #============================================================================ # FUN: Psd2Noise #============================================================================ # MASKED: PsdArray2Noise_1d function (lines 136-177) Psd2Noise_1d = PsdArray2Noise_1d #============================================================================ # FUN: NoNoise_1d #============================================================================ def NoNoise_1d(N, *args): return np.zeros([1,N]) #============================================================================ # FUN: GaussianNoise_1d #============================================================================ def GaussianNoise_1d(N,dx, Sigma): ''' PSD(f) = np.exp(-0.5^f/Sigma^2) ''' x = np.linspace( - N//2 *dx, N//2-1 * dx,N) y = np.exp(-0.5*x**2/Sigma**2) return Psd2NoisePattern_1d(y) #============================================================================ # FUN: PowerLawNoise_1d #============================================================================ def PowerLawNoise_1d(N, dx, a, b): ''' PSD(x) = a*x^b ''' x = np.arange(0,N//2+1, dx) yHalf = a * x**b # y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1])) return Psd2NoisePattern_1d(y, Semiaxis = True) #============================================================================ # FUN: CustomNoise_1d #============================================================================ def CustomNoise_1d(N, dx, xPsd, yPsd): xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N) return Psd2NoisePattern_1d(yPsd_, Semiaxis = True) #============================================================================ # CLASS: NoiseGenerator #============================================================================ class PsdGenerator: NoNoise = staticmethod(NoNoise_1d) Gauss = staticmethod(GaussianNoise_1d) PowerLaw = staticmethod(PowerLawNoise_1d) NumericArray = staticmethod(CustomNoise_1d) #============================================================================ # FUN: FitPowerLaw #============================================================================ def FitPowerLaw(x,y): ''' Fits the input data in the form y = a*x^b returns a,b ''' import scipy.optimize as optimize fFit = lambda p, x: p[0] * x ** p[1] fErr = lambda p, x, y: (y - fFit(p, x)) p0 = [max(y), -1.0] out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1) pOut = out[0] b = pOut[1] a = pOut[0] # indexErr = np.np.sqrt( covar[0][0] ) # ampErr = np.np.sqrt( covar[1][1] ) * amp return a,b #============================================================================== # CLASS: RoughnessMaker #============================================================================== class RoughnessMaker(object): class Options(): FIT_NUMERIC_DATA_WITH_POWER_LAW = True AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True def __init__(self): self.PsdType = PsdFuns.PowerLaw self.PsdParams = np.array([1,1]) self._IsNumericPsdInFreq = None self.CutoffLowHigh = [None, None] self.ProfileScaling = 1 return None @property def PsdType(self): return self._PsdType @PsdType.setter def PsdType(self, Val): ''' Note: each time that the Property value is set, self.CutoffLowHigh is reset, is specified by options ''' self. _PsdType = Val if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True: self.PsdCutoffLowHigh = [None, None] #====================================================================== # FUN: PdfEval #====================================================================== def PsdEval(self, N, df, CutoffLowHigh = [None, None]): ''' Evals the PSD in the range [0 - N*df] It's good custom to have PSD[0] = 0, so that the noise pattern is zero-mean. Parameters: ---------------------- N : int #of samples df : float spacing of spatial frequencies (df=1/TotalLength) CutoffLowHigh : [LowCutoff, HighCutoff] if >0, then Psd(f<Cutoff) is set to 0. if None, then LowCutoff = min() Returns : fAll, yPsdAll ---------------------- fAll : 1d array contains the spatial frequencies yPsd : 1d array contains the Psd ''' ''' The Pdf is evaluated only within LowCutoff and HoghCutoff If the Pdf is PsdFuns.Interp, then LowCutoff and HighCutoff are automatically set to min and max values of the experimental data ''' StrMessage = '' def GetInRange(fAll, LowCutoff, HighCutoff): _tmpa = fAll >= LowCutoff _tmpb = fAll <= HighCutoff fMid_Pos = np.all([_tmpa, _tmpb],0) fMid = fAll[fMid_Pos] return fMid_Pos, fMid LowCutoff, HighCutoff = CutoffLowHigh fMin = 0 fMax = (N-1)*df fAll = np.linspace(0, fMax, N) yPsdAll = fAll* 0 # init LowCutoff = 0 if LowCutoff is None else LowCutoff HighCutoff = N*df if HighCutoff is None else HighCutoff # Numeric PSD # Note: by default returned yPsd is always 0 outside the input data range if self.PsdType == PsdFuns.Interp: # Use Auto-Fit + PowerLaw if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: xFreq,y = self.NumericPsdGetXY() p = FitPowerLaw(1/xFreq,y) _PsdParams = p[0], -p[1] LowCutoff = np.amin(self._PsdNumericX) HighCutoff = np.amin(self._PsdNumericX) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams ) # Use Interpolation else: # check Cutoff LowVal = np.amin(self._PsdNumericX) HighVal = np.amax(self._PsdNumericX) LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff # Get the list of good frequency values (fMid) and their positions # (fMid_Pos) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) ##yPsd = self.PsdType(fMid, *self.PsdParams) ## non funziona, rimpiazzo a mano yPsd = PsdFuns.Interp(fMid, self._PsdNumericX, self._PsdNumericY) # Analytical Psd else: fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = self.PsdType(fMid, *self.PsdParams) # copying array subset yPsdAll[fMid_Pos] = yPsd return fAll, yPsdAll #====================================================================== # FUN: _FitNumericPsdWithPowerLaw #====================================================================== # in disusos def _FitNumericPsdWithPowerLaw(self): x,y = self.NumericPsdGetXY() if self._IsNumericPsdInFreq == True: p = FitPowerLaw(1/x,y) self.PsdParams = p[0], -p[1] else: p = FitPowerLaw(x,y) self.PsdParams = p[0], p[1] #====================================================================== # FUN: MakeProfile #====================================================================== def MakeProfile(self, L,N): ''' Evaluates the psd according to .PsdType, .PsdParams and .Options directives Returns an evenly-spaced array. If PsdType = NumericArray, linear interpolation is performed. :PARAM: N: # of samples :PARAM: dx: grid spacing (spatial frequency) returns: 1d arr ''' if self.PsdType == PsdFuns.Interp: # chiama codice ad hoc L_mm = L*1e3 yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N) else: print('Irreversible error. The code was not completed to handle this instance') return yRoughness * self.ProfileScaling # f, yPsd = self.PsdEval(N//2 + 1,df) # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # yPsd = PsdFuns.PowerLaw(x, *self.PsdParams) # else: # general calse # yPsd = self.PsdType(x, *self.PsdParams) # yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True) # x = np.linspace(0, N*dx,N) # # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # y = PowerLawNoise_1d(N, dx, *self.PsdParams) # else: # general calse # y = self.PsdType(N,dx, *self.PsdParams) # return y Generate = MakeProfile #====================================================================== # FUN: NumericPsdSetXY #====================================================================== def NumericPsdSetXY(self,x,y): self._PsdNumericX = x self._PsdNumericY = y #====================================================================== # FUN: NumericPsdGetXY #====================================================================== def NumericPsdGetXY(self): try: return self._PsdNumericX, self._PsdNumericY except: print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded') #====================================================================== # FUN: NumericPsdLoadXY #====================================================================== def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True): ''' @TODO: specificare formati e tipi di file Parameters ---------------------------- xIsSpatialFreq : bool true If the first column (Read_x_values) contains spatial frequencies. False if it contains lenghts. Default = True xScaling, yScaling: floats Read_x_values => Read_x_values * xScaling Read_y_values => Read_y_values * yScaling Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only. remarks -------- pippo ''' try: self._IsNumericPsdInFreq = xIsSpatialFreq s = np.loadtxt(FilePath) x = s[:,0] y = s[:,1] x = x * xScaling y = y * yScaling # inversion of x-axis if not spatial frequencies if xIsSpatialFreq == False: f = 1/x else: f = x # array sorting i = np.argsort(f) f = f[i] y = y[i] # I set the Cutoff value of the class according to available data self.PsdCutoffLowHigh = [np.amin, np.amax(f)] # I set class operating variables self.PsdType = PsdFuns.Interp self.PsdParams = [f,y] # Auto-set # fill 0-value (DC Component) # if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True: # if np.amin(x >0): # x = np.insert(x,0,0) # y = np.insert(y,0,0) # 0 in psd => 0-mean value in the noise pattern # sync other class values self.NumericPsdSetXY(f, y) except: pass def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]): ''' Parameters N: # of output samples dx: step of the x axis Note: generates an evenly spaced array ''' L = dx * N df = 1/L fPsd, yPsd = self.PsdEval(N//2 +1 , df = df, CutoffLowHigh = CutoffLowHigh ) h = Psd2Noise_1d(yPsd, Semiaxis = True) return h #====================================================================== # FUN: NumericPsdCheck #====================================================================== def NumericPsdCheck(self, N, L): df = 1/L # Stored data ff,yy = self.NumericPsdGetXY() # Evaluated data fPsd, yPsd = self.PsdEval(N, df) plt.plot(fPsd, np.log10(yPsd),'x') plt.plot(ff, np.log10(yy),'.r') plt.legend(['Evaluated data', 'Stored data']) plt.suptitle('Usage of stored data (PSD)') fMax = df*(N//2) fMin = df StrMsg = '' _max = np.max(ff) _min = np.min(ff) print('fMax query = %0.1e m^-1' % fMax ) print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) )) print('fMin query= %0.1e m^-1' % fMin ) print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) )) return StrMsg
def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True): ''' Generates a noise pattern whose Power Spectral density is given by Psd. Parameters --------------------- Psd : 1d array Contains the numeric Psd (treated as evenly spaced array) Semiaxis : 0 : does nothing 1 : halvens Pds, then replicates the halven part for left frequencies, producing an output as long as Psd 2 : replicates all Pds for lef frequencies as well, producing an output twice as long as Psd Real : boolean If True, the real part of the output is returned (default) Returns: --------------------- An array of the same length of Psd ''' if Semiaxis == True: yHalf = PsdArray PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf)) idelta = len(PsdArrayNew) - N if idelta == 1:# piu lungo PsdArrayNew = PsdArrayNew[0:-1] # uguale elif idelta == 0: pass else: print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta) y = np.fft.fftshift(PsdArrayNew) r = 2*np.pi * np.random.rand(len(PsdArrayNew)) f = np.fft.ifft(y * np.exp(1j*r)) if Real: return np.real(f) else: return f
136
177
# -*- coding: utf-8 -*- """ Created on Thu Jul 07 14:08:31 2016 @author: Mic """ from __future__ import division from wiselib2.must import * import numpy as np import wiselib2.Rayman as rm Gauss1d = lambda x ,y : None from scipy import interpolate as interpolate from matplotlib import pyplot as plt class PsdFuns: ''' Ensemble of possible Psd Functions. Each element is a callable Psd. Most used are PsdFuns.PowerLaw(x,a,b) PsdFuns.Interp(x, xData, yData) ''' @staticmethod def Flat(x, *args): N = len(x) return np.zeros([1,N]) +1 @staticmethod def PowerLaw(x,a,b): return a*x**b @staticmethod def Gaussian(x,sigma, x0=0): return np.exp(-0.5 * (x-x0)**2/sigma**2) @staticmethod def Interp(x, xData, yData): f = interpolate.interp1d(xData, yData) return f(x) def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs): ''' Generates a noise pattern based an the Power spectral density returned by PsdFun ''' x = np.arange(0,N//2+1, dx) yHalf = PsdFun(x, *PsdArgs) y = Psd2NoisePattern_1d(yHalf, Semiaxis = True ) return x,y #============================================================================ # FUN: PsdArray2Noise_1d_v2 #============================================================================ def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N): ''' Returns meters ''' from scipy import interpolate log=np.log fft = np.fft.fft fftshift = np.fft.fftshift ff = f_in yy = Psd_in L = L_mm N = int(N) N2 = int(N//2) L =300 # (mm) L_um = L*1e3 L_nm = L*1e6 fMin = 1/L_um ##vecchia riga ##fSpline = (np.array(range(N2))+1)/L_um # um^-1 fSpline = np.arange(N2)/N2 * (max(ff) - min(ff)) + min(ff) fun = interpolate.splrep(log(ff), log(yy), s=2) yPsd_log = interpolate.splev(log(fSpline), fun) ySpline = np.exp(yPsd_log) yPsd = ySpline # tolgo yPsd[fSpline<ff[0]] = 200 n = len(yPsd) plt.plot(fSpline, yPsd,'-') plt.plot(ff, yy,'x') plt.legend(['ySpline','Data']) ax = plt.axes() #ax.set_yscale('log') #ax.set_xscale('log') #% controllo RMS integrando la yPsd import scipy.integrate as integrate RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000)) #% Modo Manfredda style #yPsdNorm = np.sqrt(yPsd/L_um/1000) #yPsdNorm_reverse = yPsdNorm[::-1] yPsd_reverse = yPsd[::-1] ell= 1/(fSpline[1] - fSpline[0]) if N%2 == 0: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1])) else: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd)) ##yPsd2Norm = np.sqrt(yPsd2/ell/1000/2) yPsd2Norm = np.sqrt(yPsd2/ell/1000) n_ = len(yPsd2) print('len(yPsd2) = %0.2d' % len(yPsd2Norm)) phi = 2*np.pi * np.random.rand(n_) r = np.exp(1j*phi) yPsd2Norm_ = fftshift(yPsd2Norm) #yPsd2Norm_[len(yPsd2Norm_)//2] = 0 yRaf = np.fft.fft(r*yPsd2Norm_) yRaf = np.real(yRaf) print('Rms = %0.2e nm' % np.std(yRaf)) plt.plot(yPsd2Norm_) print('max yPsd_ = %d nm' % max(yPsd2)) print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm)) print('Rms yRaf2 = %0.2e nm' % np.std(yRaf)) return yRaf * 1e-9 #============================================================================ # FUN: Psd2Noise #============================================================================ def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True): ''' Generates a noise pattern whose Power Spectral density is given by Psd. Parameters --------------------- Psd : 1d array Contains the numeric Psd (treated as evenly spaced array) Semiaxis : 0 : does nothing 1 : halvens Pds, then replicates the halven part for left frequencies, producing an output as long as Psd 2 : replicates all Pds for lef frequencies as well, producing an output twice as long as Psd Real : boolean If True, the real part of the output is returned (default) Returns: --------------------- An array of the same length of Psd ''' if Semiaxis == True: yHalf = PsdArray PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf)) idelta = len(PsdArrayNew) - N if idelta == 1:# piu lungo PsdArrayNew = PsdArrayNew[0:-1] # uguale elif idelta == 0: pass else: print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta) y = np.fft.fftshift(PsdArrayNew) r = 2*np.pi * np.random.rand(len(PsdArrayNew)) f = np.fft.ifft(y * np.exp(1j*r)) if Real: return np.real(f) else: return f Psd2Noise_1d = PsdArray2Noise_1d #============================================================================ # FUN: NoNoise_1d #============================================================================ def NoNoise_1d(N, *args): return np.zeros([1,N]) #============================================================================ # FUN: GaussianNoise_1d #============================================================================ def GaussianNoise_1d(N,dx, Sigma): ''' PSD(f) = np.exp(-0.5^f/Sigma^2) ''' x = np.linspace( - N//2 *dx, N//2-1 * dx,N) y = np.exp(-0.5*x**2/Sigma**2) return Psd2NoisePattern_1d(y) #============================================================================ # FUN: PowerLawNoise_1d #============================================================================ def PowerLawNoise_1d(N, dx, a, b): ''' PSD(x) = a*x^b ''' x = np.arange(0,N//2+1, dx) yHalf = a * x**b # y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1])) return Psd2NoisePattern_1d(y, Semiaxis = True) #============================================================================ # FUN: CustomNoise_1d #============================================================================ def CustomNoise_1d(N, dx, xPsd, yPsd): xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N) return Psd2NoisePattern_1d(yPsd_, Semiaxis = True) #============================================================================ # CLASS: NoiseGenerator #============================================================================ class PsdGenerator: NoNoise = staticmethod(NoNoise_1d) Gauss = staticmethod(GaussianNoise_1d) PowerLaw = staticmethod(PowerLawNoise_1d) NumericArray = staticmethod(CustomNoise_1d) #============================================================================ # FUN: FitPowerLaw #============================================================================ def FitPowerLaw(x,y): ''' Fits the input data in the form y = a*x^b returns a,b ''' import scipy.optimize as optimize fFit = lambda p, x: p[0] * x ** p[1] fErr = lambda p, x, y: (y - fFit(p, x)) p0 = [max(y), -1.0] out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1) pOut = out[0] b = pOut[1] a = pOut[0] # indexErr = np.np.sqrt( covar[0][0] ) # ampErr = np.np.sqrt( covar[1][1] ) * amp return a,b #============================================================================== # CLASS: RoughnessMaker #============================================================================== class RoughnessMaker(object): class Options(): FIT_NUMERIC_DATA_WITH_POWER_LAW = True AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True def __init__(self): self.PsdType = PsdFuns.PowerLaw self.PsdParams = np.array([1,1]) self._IsNumericPsdInFreq = None self.CutoffLowHigh = [None, None] self.ProfileScaling = 1 return None @property def PsdType(self): return self._PsdType @PsdType.setter def PsdType(self, Val): ''' Note: each time that the Property value is set, self.CutoffLowHigh is reset, is specified by options ''' self. _PsdType = Val if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True: self.PsdCutoffLowHigh = [None, None] #====================================================================== # FUN: PdfEval #====================================================================== def PsdEval(self, N, df, CutoffLowHigh = [None, None]): ''' Evals the PSD in the range [0 - N*df] It's good custom to have PSD[0] = 0, so that the noise pattern is zero-mean. Parameters: ---------------------- N : int #of samples df : float spacing of spatial frequencies (df=1/TotalLength) CutoffLowHigh : [LowCutoff, HighCutoff] if >0, then Psd(f<Cutoff) is set to 0. if None, then LowCutoff = min() Returns : fAll, yPsdAll ---------------------- fAll : 1d array contains the spatial frequencies yPsd : 1d array contains the Psd ''' ''' The Pdf is evaluated only within LowCutoff and HoghCutoff If the Pdf is PsdFuns.Interp, then LowCutoff and HighCutoff are automatically set to min and max values of the experimental data ''' StrMessage = '' def GetInRange(fAll, LowCutoff, HighCutoff): _tmpa = fAll >= LowCutoff _tmpb = fAll <= HighCutoff fMid_Pos = np.all([_tmpa, _tmpb],0) fMid = fAll[fMid_Pos] return fMid_Pos, fMid LowCutoff, HighCutoff = CutoffLowHigh fMin = 0 fMax = (N-1)*df fAll = np.linspace(0, fMax, N) yPsdAll = fAll* 0 # init LowCutoff = 0 if LowCutoff is None else LowCutoff HighCutoff = N*df if HighCutoff is None else HighCutoff # Numeric PSD # Note: by default returned yPsd is always 0 outside the input data range if self.PsdType == PsdFuns.Interp: # Use Auto-Fit + PowerLaw if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: xFreq,y = self.NumericPsdGetXY() p = FitPowerLaw(1/xFreq,y) _PsdParams = p[0], -p[1] LowCutoff = np.amin(self._PsdNumericX) HighCutoff = np.amin(self._PsdNumericX) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams ) # Use Interpolation else: # check Cutoff LowVal = np.amin(self._PsdNumericX) HighVal = np.amax(self._PsdNumericX) LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff # Get the list of good frequency values (fMid) and their positions # (fMid_Pos) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) ##yPsd = self.PsdType(fMid, *self.PsdParams) ## non funziona, rimpiazzo a mano yPsd = PsdFuns.Interp(fMid, self._PsdNumericX, self._PsdNumericY) # Analytical Psd else: fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = self.PsdType(fMid, *self.PsdParams) # copying array subset yPsdAll[fMid_Pos] = yPsd return fAll, yPsdAll #====================================================================== # FUN: _FitNumericPsdWithPowerLaw #====================================================================== # in disusos def _FitNumericPsdWithPowerLaw(self): x,y = self.NumericPsdGetXY() if self._IsNumericPsdInFreq == True: p = FitPowerLaw(1/x,y) self.PsdParams = p[0], -p[1] else: p = FitPowerLaw(x,y) self.PsdParams = p[0], p[1] #====================================================================== # FUN: MakeProfile #====================================================================== def MakeProfile(self, L,N): ''' Evaluates the psd according to .PsdType, .PsdParams and .Options directives Returns an evenly-spaced array. If PsdType = NumericArray, linear interpolation is performed. :PARAM: N: # of samples :PARAM: dx: grid spacing (spatial frequency) returns: 1d arr ''' if self.PsdType == PsdFuns.Interp: # chiama codice ad hoc L_mm = L*1e3 yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N) else: print('Irreversible error. The code was not completed to handle this instance') return yRoughness * self.ProfileScaling # f, yPsd = self.PsdEval(N//2 + 1,df) # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # yPsd = PsdFuns.PowerLaw(x, *self.PsdParams) # else: # general calse # yPsd = self.PsdType(x, *self.PsdParams) # yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True) # x = np.linspace(0, N*dx,N) # # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # y = PowerLawNoise_1d(N, dx, *self.PsdParams) # else: # general calse # y = self.PsdType(N,dx, *self.PsdParams) # return y Generate = MakeProfile #====================================================================== # FUN: NumericPsdSetXY #====================================================================== def NumericPsdSetXY(self,x,y): self._PsdNumericX = x self._PsdNumericY = y #====================================================================== # FUN: NumericPsdGetXY #====================================================================== def NumericPsdGetXY(self): try: return self._PsdNumericX, self._PsdNumericY except: print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded') #====================================================================== # FUN: NumericPsdLoadXY #====================================================================== def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True): ''' @TODO: specificare formati e tipi di file Parameters ---------------------------- xIsSpatialFreq : bool true If the first column (Read_x_values) contains spatial frequencies. False if it contains lenghts. Default = True xScaling, yScaling: floats Read_x_values => Read_x_values * xScaling Read_y_values => Read_y_values * yScaling Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only. remarks -------- pippo ''' try: self._IsNumericPsdInFreq = xIsSpatialFreq s = np.loadtxt(FilePath) x = s[:,0] y = s[:,1] x = x * xScaling y = y * yScaling # inversion of x-axis if not spatial frequencies if xIsSpatialFreq == False: f = 1/x else: f = x # array sorting i = np.argsort(f) f = f[i] y = y[i] # I set the Cutoff value of the class according to available data self.PsdCutoffLowHigh = [np.amin, np.amax(f)] # I set class operating variables self.PsdType = PsdFuns.Interp self.PsdParams = [f,y] # Auto-set # fill 0-value (DC Component) # if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True: # if np.amin(x >0): # x = np.insert(x,0,0) # y = np.insert(y,0,0) # 0 in psd => 0-mean value in the noise pattern # sync other class values self.NumericPsdSetXY(f, y) except: pass def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]): ''' Parameters N: # of output samples dx: step of the x axis Note: generates an evenly spaced array ''' L = dx * N df = 1/L fPsd, yPsd = self.PsdEval(N//2 +1 , df = df, CutoffLowHigh = CutoffLowHigh ) h = Psd2Noise_1d(yPsd, Semiaxis = True) return h #====================================================================== # FUN: NumericPsdCheck #====================================================================== def NumericPsdCheck(self, N, L): df = 1/L # Stored data ff,yy = self.NumericPsdGetXY() # Evaluated data fPsd, yPsd = self.PsdEval(N, df) plt.plot(fPsd, np.log10(yPsd),'x') plt.plot(ff, np.log10(yy),'.r') plt.legend(['Evaluated data', 'Stored data']) plt.suptitle('Usage of stored data (PSD)') fMax = df*(N//2) fMin = df StrMsg = '' _max = np.max(ff) _min = np.min(ff) print('fMax query = %0.1e m^-1' % fMax ) print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) )) print('fMin query= %0.1e m^-1' % fMin ) print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) )) return StrMsg
MakeProfile
Evaluates the psd according to .PsdType, .PsdParams and .Options directives Returns an evenly-spaced array. If PsdType = NumericArray, linear interpolation is performed. :PARAM: N: # of samples :PARAM: dx: grid spacing (spatial frequency) returns: 1d arr
# -*- coding: utf-8 -*- """ Created on Thu Jul 07 14:08:31 2016 @author: Mic """ from __future__ import division from wiselib2.must import * import numpy as np import wiselib2.Rayman as rm Gauss1d = lambda x ,y : None from scipy import interpolate as interpolate from matplotlib import pyplot as plt class PsdFuns: ''' Ensemble of possible Psd Functions. Each element is a callable Psd. Most used are PsdFuns.PowerLaw(x,a,b) PsdFuns.Interp(x, xData, yData) ''' @staticmethod def Flat(x, *args): N = len(x) return np.zeros([1,N]) +1 @staticmethod def PowerLaw(x,a,b): return a*x**b @staticmethod def Gaussian(x,sigma, x0=0): return np.exp(-0.5 * (x-x0)**2/sigma**2) @staticmethod def Interp(x, xData, yData): f = interpolate.interp1d(xData, yData) return f(x) def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs): ''' Generates a noise pattern based an the Power spectral density returned by PsdFun ''' x = np.arange(0,N//2+1, dx) yHalf = PsdFun(x, *PsdArgs) y = Psd2NoisePattern_1d(yHalf, Semiaxis = True ) return x,y #============================================================================ # FUN: PsdArray2Noise_1d_v2 #============================================================================ def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N): ''' Returns meters ''' from scipy import interpolate log=np.log fft = np.fft.fft fftshift = np.fft.fftshift ff = f_in yy = Psd_in L = L_mm N = int(N) N2 = int(N//2) L =300 # (mm) L_um = L*1e3 L_nm = L*1e6 fMin = 1/L_um ##vecchia riga ##fSpline = (np.array(range(N2))+1)/L_um # um^-1 fSpline = np.arange(N2)/N2 * (max(ff) - min(ff)) + min(ff) fun = interpolate.splrep(log(ff), log(yy), s=2) yPsd_log = interpolate.splev(log(fSpline), fun) ySpline = np.exp(yPsd_log) yPsd = ySpline # tolgo yPsd[fSpline<ff[0]] = 200 n = len(yPsd) plt.plot(fSpline, yPsd,'-') plt.plot(ff, yy,'x') plt.legend(['ySpline','Data']) ax = plt.axes() #ax.set_yscale('log') #ax.set_xscale('log') #% controllo RMS integrando la yPsd import scipy.integrate as integrate RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000)) #% Modo Manfredda style #yPsdNorm = np.sqrt(yPsd/L_um/1000) #yPsdNorm_reverse = yPsdNorm[::-1] yPsd_reverse = yPsd[::-1] ell= 1/(fSpline[1] - fSpline[0]) if N%2 == 0: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1])) else: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd)) ##yPsd2Norm = np.sqrt(yPsd2/ell/1000/2) yPsd2Norm = np.sqrt(yPsd2/ell/1000) n_ = len(yPsd2) print('len(yPsd2) = %0.2d' % len(yPsd2Norm)) phi = 2*np.pi * np.random.rand(n_) r = np.exp(1j*phi) yPsd2Norm_ = fftshift(yPsd2Norm) #yPsd2Norm_[len(yPsd2Norm_)//2] = 0 yRaf = np.fft.fft(r*yPsd2Norm_) yRaf = np.real(yRaf) print('Rms = %0.2e nm' % np.std(yRaf)) plt.plot(yPsd2Norm_) print('max yPsd_ = %d nm' % max(yPsd2)) print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm)) print('Rms yRaf2 = %0.2e nm' % np.std(yRaf)) return yRaf * 1e-9 #============================================================================ # FUN: Psd2Noise #============================================================================ def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True): ''' Generates a noise pattern whose Power Spectral density is given by Psd. Parameters --------------------- Psd : 1d array Contains the numeric Psd (treated as evenly spaced array) Semiaxis : 0 : does nothing 1 : halvens Pds, then replicates the halven part for left frequencies, producing an output as long as Psd 2 : replicates all Pds for lef frequencies as well, producing an output twice as long as Psd Real : boolean If True, the real part of the output is returned (default) Returns: --------------------- An array of the same length of Psd ''' if Semiaxis == True: yHalf = PsdArray PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf)) idelta = len(PsdArrayNew) - N if idelta == 1:# piu lungo PsdArrayNew = PsdArrayNew[0:-1] # uguale elif idelta == 0: pass else: print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta) y = np.fft.fftshift(PsdArrayNew) r = 2*np.pi * np.random.rand(len(PsdArrayNew)) f = np.fft.ifft(y * np.exp(1j*r)) if Real: return np.real(f) else: return f Psd2Noise_1d = PsdArray2Noise_1d #============================================================================ # FUN: NoNoise_1d #============================================================================ def NoNoise_1d(N, *args): return np.zeros([1,N]) #============================================================================ # FUN: GaussianNoise_1d #============================================================================ def GaussianNoise_1d(N,dx, Sigma): ''' PSD(f) = np.exp(-0.5^f/Sigma^2) ''' x = np.linspace( - N//2 *dx, N//2-1 * dx,N) y = np.exp(-0.5*x**2/Sigma**2) return Psd2NoisePattern_1d(y) #============================================================================ # FUN: PowerLawNoise_1d #============================================================================ def PowerLawNoise_1d(N, dx, a, b): ''' PSD(x) = a*x^b ''' x = np.arange(0,N//2+1, dx) yHalf = a * x**b # y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1])) return Psd2NoisePattern_1d(y, Semiaxis = True) #============================================================================ # FUN: CustomNoise_1d #============================================================================ def CustomNoise_1d(N, dx, xPsd, yPsd): xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N) return Psd2NoisePattern_1d(yPsd_, Semiaxis = True) #============================================================================ # CLASS: NoiseGenerator #============================================================================ class PsdGenerator: NoNoise = staticmethod(NoNoise_1d) Gauss = staticmethod(GaussianNoise_1d) PowerLaw = staticmethod(PowerLawNoise_1d) NumericArray = staticmethod(CustomNoise_1d) #============================================================================ # FUN: FitPowerLaw #============================================================================ def FitPowerLaw(x,y): ''' Fits the input data in the form y = a*x^b returns a,b ''' import scipy.optimize as optimize fFit = lambda p, x: p[0] * x ** p[1] fErr = lambda p, x, y: (y - fFit(p, x)) p0 = [max(y), -1.0] out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1) pOut = out[0] b = pOut[1] a = pOut[0] # indexErr = np.np.sqrt( covar[0][0] ) # ampErr = np.np.sqrt( covar[1][1] ) * amp return a,b #============================================================================== # CLASS: RoughnessMaker #============================================================================== class RoughnessMaker(object): class Options(): FIT_NUMERIC_DATA_WITH_POWER_LAW = True AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True def __init__(self): self.PsdType = PsdFuns.PowerLaw self.PsdParams = np.array([1,1]) self._IsNumericPsdInFreq = None self.CutoffLowHigh = [None, None] self.ProfileScaling = 1 return None @property def PsdType(self): return self._PsdType @PsdType.setter def PsdType(self, Val): ''' Note: each time that the Property value is set, self.CutoffLowHigh is reset, is specified by options ''' self. _PsdType = Val if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True: self.PsdCutoffLowHigh = [None, None] #====================================================================== # FUN: PdfEval #====================================================================== def PsdEval(self, N, df, CutoffLowHigh = [None, None]): ''' Evals the PSD in the range [0 - N*df] It's good custom to have PSD[0] = 0, so that the noise pattern is zero-mean. Parameters: ---------------------- N : int #of samples df : float spacing of spatial frequencies (df=1/TotalLength) CutoffLowHigh : [LowCutoff, HighCutoff] if >0, then Psd(f<Cutoff) is set to 0. if None, then LowCutoff = min() Returns : fAll, yPsdAll ---------------------- fAll : 1d array contains the spatial frequencies yPsd : 1d array contains the Psd ''' ''' The Pdf is evaluated only within LowCutoff and HoghCutoff If the Pdf is PsdFuns.Interp, then LowCutoff and HighCutoff are automatically set to min and max values of the experimental data ''' StrMessage = '' def GetInRange(fAll, LowCutoff, HighCutoff): _tmpa = fAll >= LowCutoff _tmpb = fAll <= HighCutoff fMid_Pos = np.all([_tmpa, _tmpb],0) fMid = fAll[fMid_Pos] return fMid_Pos, fMid LowCutoff, HighCutoff = CutoffLowHigh fMin = 0 fMax = (N-1)*df fAll = np.linspace(0, fMax, N) yPsdAll = fAll* 0 # init LowCutoff = 0 if LowCutoff is None else LowCutoff HighCutoff = N*df if HighCutoff is None else HighCutoff # Numeric PSD # Note: by default returned yPsd is always 0 outside the input data range if self.PsdType == PsdFuns.Interp: # Use Auto-Fit + PowerLaw if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: xFreq,y = self.NumericPsdGetXY() p = FitPowerLaw(1/xFreq,y) _PsdParams = p[0], -p[1] LowCutoff = np.amin(self._PsdNumericX) HighCutoff = np.amin(self._PsdNumericX) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams ) # Use Interpolation else: # check Cutoff LowVal = np.amin(self._PsdNumericX) HighVal = np.amax(self._PsdNumericX) LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff # Get the list of good frequency values (fMid) and their positions # (fMid_Pos) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) ##yPsd = self.PsdType(fMid, *self.PsdParams) ## non funziona, rimpiazzo a mano yPsd = PsdFuns.Interp(fMid, self._PsdNumericX, self._PsdNumericY) # Analytical Psd else: fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = self.PsdType(fMid, *self.PsdParams) # copying array subset yPsdAll[fMid_Pos] = yPsd return fAll, yPsdAll #====================================================================== # FUN: _FitNumericPsdWithPowerLaw #====================================================================== # in disusos def _FitNumericPsdWithPowerLaw(self): x,y = self.NumericPsdGetXY() if self._IsNumericPsdInFreq == True: p = FitPowerLaw(1/x,y) self.PsdParams = p[0], -p[1] else: p = FitPowerLaw(x,y) self.PsdParams = p[0], p[1] #====================================================================== # FUN: MakeProfile #====================================================================== # MASKED: MakeProfile function (lines 393-413) # f, yPsd = self.PsdEval(N//2 + 1,df) # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # yPsd = PsdFuns.PowerLaw(x, *self.PsdParams) # else: # general calse # yPsd = self.PsdType(x, *self.PsdParams) # yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True) # x = np.linspace(0, N*dx,N) # # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # y = PowerLawNoise_1d(N, dx, *self.PsdParams) # else: # general calse # y = self.PsdType(N,dx, *self.PsdParams) # return y Generate = MakeProfile #====================================================================== # FUN: NumericPsdSetXY #====================================================================== def NumericPsdSetXY(self,x,y): self._PsdNumericX = x self._PsdNumericY = y #====================================================================== # FUN: NumericPsdGetXY #====================================================================== def NumericPsdGetXY(self): try: return self._PsdNumericX, self._PsdNumericY except: print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded') #====================================================================== # FUN: NumericPsdLoadXY #====================================================================== def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True): ''' @TODO: specificare formati e tipi di file Parameters ---------------------------- xIsSpatialFreq : bool true If the first column (Read_x_values) contains spatial frequencies. False if it contains lenghts. Default = True xScaling, yScaling: floats Read_x_values => Read_x_values * xScaling Read_y_values => Read_y_values * yScaling Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only. remarks -------- pippo ''' try: self._IsNumericPsdInFreq = xIsSpatialFreq s = np.loadtxt(FilePath) x = s[:,0] y = s[:,1] x = x * xScaling y = y * yScaling # inversion of x-axis if not spatial frequencies if xIsSpatialFreq == False: f = 1/x else: f = x # array sorting i = np.argsort(f) f = f[i] y = y[i] # I set the Cutoff value of the class according to available data self.PsdCutoffLowHigh = [np.amin, np.amax(f)] # I set class operating variables self.PsdType = PsdFuns.Interp self.PsdParams = [f,y] # Auto-set # fill 0-value (DC Component) # if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True: # if np.amin(x >0): # x = np.insert(x,0,0) # y = np.insert(y,0,0) # 0 in psd => 0-mean value in the noise pattern # sync other class values self.NumericPsdSetXY(f, y) except: pass def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]): ''' Parameters N: # of output samples dx: step of the x axis Note: generates an evenly spaced array ''' L = dx * N df = 1/L fPsd, yPsd = self.PsdEval(N//2 +1 , df = df, CutoffLowHigh = CutoffLowHigh ) h = Psd2Noise_1d(yPsd, Semiaxis = True) return h #====================================================================== # FUN: NumericPsdCheck #====================================================================== def NumericPsdCheck(self, N, L): df = 1/L # Stored data ff,yy = self.NumericPsdGetXY() # Evaluated data fPsd, yPsd = self.PsdEval(N, df) plt.plot(fPsd, np.log10(yPsd),'x') plt.plot(ff, np.log10(yy),'.r') plt.legend(['Evaluated data', 'Stored data']) plt.suptitle('Usage of stored data (PSD)') fMax = df*(N//2) fMin = df StrMsg = '' _max = np.max(ff) _min = np.min(ff) print('fMax query = %0.1e m^-1' % fMax ) print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) )) print('fMin query= %0.1e m^-1' % fMin ) print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) )) return StrMsg
def MakeProfile(self, L,N): ''' Evaluates the psd according to .PsdType, .PsdParams and .Options directives Returns an evenly-spaced array. If PsdType = NumericArray, linear interpolation is performed. :PARAM: N: # of samples :PARAM: dx: grid spacing (spatial frequency) returns: 1d arr ''' if self.PsdType == PsdFuns.Interp: # chiama codice ad hoc L_mm = L*1e3 yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N) else: print('Irreversible error. The code was not completed to handle this instance') return yRoughness * self.ProfileScaling
393
413
# -*- coding: utf-8 -*- """ Created on Thu Jul 07 14:08:31 2016 @author: Mic """ from __future__ import division from wiselib2.must import * import numpy as np import wiselib2.Rayman as rm Gauss1d = lambda x ,y : None from scipy import interpolate as interpolate from matplotlib import pyplot as plt class PsdFuns: ''' Ensemble of possible Psd Functions. Each element is a callable Psd. Most used are PsdFuns.PowerLaw(x,a,b) PsdFuns.Interp(x, xData, yData) ''' @staticmethod def Flat(x, *args): N = len(x) return np.zeros([1,N]) +1 @staticmethod def PowerLaw(x,a,b): return a*x**b @staticmethod def Gaussian(x,sigma, x0=0): return np.exp(-0.5 * (x-x0)**2/sigma**2) @staticmethod def Interp(x, xData, yData): f = interpolate.interp1d(xData, yData) return f(x) def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs): ''' Generates a noise pattern based an the Power spectral density returned by PsdFun ''' x = np.arange(0,N//2+1, dx) yHalf = PsdFun(x, *PsdArgs) y = Psd2NoisePattern_1d(yHalf, Semiaxis = True ) return x,y #============================================================================ # FUN: PsdArray2Noise_1d_v2 #============================================================================ def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N): ''' Returns meters ''' from scipy import interpolate log=np.log fft = np.fft.fft fftshift = np.fft.fftshift ff = f_in yy = Psd_in L = L_mm N = int(N) N2 = int(N//2) L =300 # (mm) L_um = L*1e3 L_nm = L*1e6 fMin = 1/L_um ##vecchia riga ##fSpline = (np.array(range(N2))+1)/L_um # um^-1 fSpline = np.arange(N2)/N2 * (max(ff) - min(ff)) + min(ff) fun = interpolate.splrep(log(ff), log(yy), s=2) yPsd_log = interpolate.splev(log(fSpline), fun) ySpline = np.exp(yPsd_log) yPsd = ySpline # tolgo yPsd[fSpline<ff[0]] = 200 n = len(yPsd) plt.plot(fSpline, yPsd,'-') plt.plot(ff, yy,'x') plt.legend(['ySpline','Data']) ax = plt.axes() #ax.set_yscale('log') #ax.set_xscale('log') #% controllo RMS integrando la yPsd import scipy.integrate as integrate RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000)) #% Modo Manfredda style #yPsdNorm = np.sqrt(yPsd/L_um/1000) #yPsdNorm_reverse = yPsdNorm[::-1] yPsd_reverse = yPsd[::-1] ell= 1/(fSpline[1] - fSpline[0]) if N%2 == 0: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1])) else: yPsd2 = np.hstack((yPsd_reverse ,0,yPsd)) ##yPsd2Norm = np.sqrt(yPsd2/ell/1000/2) yPsd2Norm = np.sqrt(yPsd2/ell/1000) n_ = len(yPsd2) print('len(yPsd2) = %0.2d' % len(yPsd2Norm)) phi = 2*np.pi * np.random.rand(n_) r = np.exp(1j*phi) yPsd2Norm_ = fftshift(yPsd2Norm) #yPsd2Norm_[len(yPsd2Norm_)//2] = 0 yRaf = np.fft.fft(r*yPsd2Norm_) yRaf = np.real(yRaf) print('Rms = %0.2e nm' % np.std(yRaf)) plt.plot(yPsd2Norm_) print('max yPsd_ = %d nm' % max(yPsd2)) print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm)) print('Rms yRaf2 = %0.2e nm' % np.std(yRaf)) return yRaf * 1e-9 #============================================================================ # FUN: Psd2Noise #============================================================================ def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True): ''' Generates a noise pattern whose Power Spectral density is given by Psd. Parameters --------------------- Psd : 1d array Contains the numeric Psd (treated as evenly spaced array) Semiaxis : 0 : does nothing 1 : halvens Pds, then replicates the halven part for left frequencies, producing an output as long as Psd 2 : replicates all Pds for lef frequencies as well, producing an output twice as long as Psd Real : boolean If True, the real part of the output is returned (default) Returns: --------------------- An array of the same length of Psd ''' if Semiaxis == True: yHalf = PsdArray PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf)) idelta = len(PsdArrayNew) - N if idelta == 1:# piu lungo PsdArrayNew = PsdArrayNew[0:-1] # uguale elif idelta == 0: pass else: print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta) y = np.fft.fftshift(PsdArrayNew) r = 2*np.pi * np.random.rand(len(PsdArrayNew)) f = np.fft.ifft(y * np.exp(1j*r)) if Real: return np.real(f) else: return f Psd2Noise_1d = PsdArray2Noise_1d #============================================================================ # FUN: NoNoise_1d #============================================================================ def NoNoise_1d(N, *args): return np.zeros([1,N]) #============================================================================ # FUN: GaussianNoise_1d #============================================================================ def GaussianNoise_1d(N,dx, Sigma): ''' PSD(f) = np.exp(-0.5^f/Sigma^2) ''' x = np.linspace( - N//2 *dx, N//2-1 * dx,N) y = np.exp(-0.5*x**2/Sigma**2) return Psd2NoisePattern_1d(y) #============================================================================ # FUN: PowerLawNoise_1d #============================================================================ def PowerLawNoise_1d(N, dx, a, b): ''' PSD(x) = a*x^b ''' x = np.arange(0,N//2+1, dx) yHalf = a * x**b # y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1])) return Psd2NoisePattern_1d(y, Semiaxis = True) #============================================================================ # FUN: CustomNoise_1d #============================================================================ def CustomNoise_1d(N, dx, xPsd, yPsd): xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N) return Psd2NoisePattern_1d(yPsd_, Semiaxis = True) #============================================================================ # CLASS: NoiseGenerator #============================================================================ class PsdGenerator: NoNoise = staticmethod(NoNoise_1d) Gauss = staticmethod(GaussianNoise_1d) PowerLaw = staticmethod(PowerLawNoise_1d) NumericArray = staticmethod(CustomNoise_1d) #============================================================================ # FUN: FitPowerLaw #============================================================================ def FitPowerLaw(x,y): ''' Fits the input data in the form y = a*x^b returns a,b ''' import scipy.optimize as optimize fFit = lambda p, x: p[0] * x ** p[1] fErr = lambda p, x, y: (y - fFit(p, x)) p0 = [max(y), -1.0] out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1) pOut = out[0] b = pOut[1] a = pOut[0] # indexErr = np.np.sqrt( covar[0][0] ) # ampErr = np.np.sqrt( covar[1][1] ) * amp return a,b #============================================================================== # CLASS: RoughnessMaker #============================================================================== class RoughnessMaker(object): class Options(): FIT_NUMERIC_DATA_WITH_POWER_LAW = True AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True def __init__(self): self.PsdType = PsdFuns.PowerLaw self.PsdParams = np.array([1,1]) self._IsNumericPsdInFreq = None self.CutoffLowHigh = [None, None] self.ProfileScaling = 1 return None @property def PsdType(self): return self._PsdType @PsdType.setter def PsdType(self, Val): ''' Note: each time that the Property value is set, self.CutoffLowHigh is reset, is specified by options ''' self. _PsdType = Val if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True: self.PsdCutoffLowHigh = [None, None] #====================================================================== # FUN: PdfEval #====================================================================== def PsdEval(self, N, df, CutoffLowHigh = [None, None]): ''' Evals the PSD in the range [0 - N*df] It's good custom to have PSD[0] = 0, so that the noise pattern is zero-mean. Parameters: ---------------------- N : int #of samples df : float spacing of spatial frequencies (df=1/TotalLength) CutoffLowHigh : [LowCutoff, HighCutoff] if >0, then Psd(f<Cutoff) is set to 0. if None, then LowCutoff = min() Returns : fAll, yPsdAll ---------------------- fAll : 1d array contains the spatial frequencies yPsd : 1d array contains the Psd ''' ''' The Pdf is evaluated only within LowCutoff and HoghCutoff If the Pdf is PsdFuns.Interp, then LowCutoff and HighCutoff are automatically set to min and max values of the experimental data ''' StrMessage = '' def GetInRange(fAll, LowCutoff, HighCutoff): _tmpa = fAll >= LowCutoff _tmpb = fAll <= HighCutoff fMid_Pos = np.all([_tmpa, _tmpb],0) fMid = fAll[fMid_Pos] return fMid_Pos, fMid LowCutoff, HighCutoff = CutoffLowHigh fMin = 0 fMax = (N-1)*df fAll = np.linspace(0, fMax, N) yPsdAll = fAll* 0 # init LowCutoff = 0 if LowCutoff is None else LowCutoff HighCutoff = N*df if HighCutoff is None else HighCutoff # Numeric PSD # Note: by default returned yPsd is always 0 outside the input data range if self.PsdType == PsdFuns.Interp: # Use Auto-Fit + PowerLaw if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: xFreq,y = self.NumericPsdGetXY() p = FitPowerLaw(1/xFreq,y) _PsdParams = p[0], -p[1] LowCutoff = np.amin(self._PsdNumericX) HighCutoff = np.amin(self._PsdNumericX) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams ) # Use Interpolation else: # check Cutoff LowVal = np.amin(self._PsdNumericX) HighVal = np.amax(self._PsdNumericX) LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff # Get the list of good frequency values (fMid) and their positions # (fMid_Pos) fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) ##yPsd = self.PsdType(fMid, *self.PsdParams) ## non funziona, rimpiazzo a mano yPsd = PsdFuns.Interp(fMid, self._PsdNumericX, self._PsdNumericY) # Analytical Psd else: fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff) yPsd = self.PsdType(fMid, *self.PsdParams) # copying array subset yPsdAll[fMid_Pos] = yPsd return fAll, yPsdAll #====================================================================== # FUN: _FitNumericPsdWithPowerLaw #====================================================================== # in disusos def _FitNumericPsdWithPowerLaw(self): x,y = self.NumericPsdGetXY() if self._IsNumericPsdInFreq == True: p = FitPowerLaw(1/x,y) self.PsdParams = p[0], -p[1] else: p = FitPowerLaw(x,y) self.PsdParams = p[0], p[1] #====================================================================== # FUN: MakeProfile #====================================================================== def MakeProfile(self, L,N): ''' Evaluates the psd according to .PsdType, .PsdParams and .Options directives Returns an evenly-spaced array. If PsdType = NumericArray, linear interpolation is performed. :PARAM: N: # of samples :PARAM: dx: grid spacing (spatial frequency) returns: 1d arr ''' if self.PsdType == PsdFuns.Interp: # chiama codice ad hoc L_mm = L*1e3 yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N) else: print('Irreversible error. The code was not completed to handle this instance') return yRoughness * self.ProfileScaling # f, yPsd = self.PsdEval(N//2 + 1,df) # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # yPsd = PsdFuns.PowerLaw(x, *self.PsdParams) # else: # general calse # yPsd = self.PsdType(x, *self.PsdParams) # yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True) # x = np.linspace(0, N*dx,N) # # Special case # if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True: # self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY())) # y = PowerLawNoise_1d(N, dx, *self.PsdParams) # else: # general calse # y = self.PsdType(N,dx, *self.PsdParams) # return y Generate = MakeProfile #====================================================================== # FUN: NumericPsdSetXY #====================================================================== def NumericPsdSetXY(self,x,y): self._PsdNumericX = x self._PsdNumericY = y #====================================================================== # FUN: NumericPsdGetXY #====================================================================== def NumericPsdGetXY(self): try: return self._PsdNumericX, self._PsdNumericY except: print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded') #====================================================================== # FUN: NumericPsdLoadXY #====================================================================== def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True): ''' @TODO: specificare formati e tipi di file Parameters ---------------------------- xIsSpatialFreq : bool true If the first column (Read_x_values) contains spatial frequencies. False if it contains lenghts. Default = True xScaling, yScaling: floats Read_x_values => Read_x_values * xScaling Read_y_values => Read_y_values * yScaling Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only. remarks -------- pippo ''' try: self._IsNumericPsdInFreq = xIsSpatialFreq s = np.loadtxt(FilePath) x = s[:,0] y = s[:,1] x = x * xScaling y = y * yScaling # inversion of x-axis if not spatial frequencies if xIsSpatialFreq == False: f = 1/x else: f = x # array sorting i = np.argsort(f) f = f[i] y = y[i] # I set the Cutoff value of the class according to available data self.PsdCutoffLowHigh = [np.amin, np.amax(f)] # I set class operating variables self.PsdType = PsdFuns.Interp self.PsdParams = [f,y] # Auto-set # fill 0-value (DC Component) # if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True: # if np.amin(x >0): # x = np.insert(x,0,0) # y = np.insert(y,0,0) # 0 in psd => 0-mean value in the noise pattern # sync other class values self.NumericPsdSetXY(f, y) except: pass def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]): ''' Parameters N: # of output samples dx: step of the x axis Note: generates an evenly spaced array ''' L = dx * N df = 1/L fPsd, yPsd = self.PsdEval(N//2 +1 , df = df, CutoffLowHigh = CutoffLowHigh ) h = Psd2Noise_1d(yPsd, Semiaxis = True) return h #====================================================================== # FUN: NumericPsdCheck #====================================================================== def NumericPsdCheck(self, N, L): df = 1/L # Stored data ff,yy = self.NumericPsdGetXY() # Evaluated data fPsd, yPsd = self.PsdEval(N, df) plt.plot(fPsd, np.log10(yPsd),'x') plt.plot(ff, np.log10(yy),'.r') plt.legend(['Evaluated data', 'Stored data']) plt.suptitle('Usage of stored data (PSD)') fMax = df*(N//2) fMin = df StrMsg = '' _max = np.max(ff) _min = np.min(ff) print('fMax query = %0.1e m^-1' % fMax ) print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) )) print('fMin query= %0.1e m^-1' % fMin ) print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) )) return StrMsg
__getitem__
Get the procedure params according to the index. Create the register when it does not exist. :param index: :return: ProcedureParamStorage
#!/usr/bin/python3 # -*- coding: utf8 -*- # Copyright (c) 2020 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Procedure Params """ class ProcedureParams: """ The procedure params dict """ def __init__(self): """ The constructor of the ProcedureParams class """ self.paramsDict = {} # the inner data for procedure params dict # MASKED: __getitem__ function (lines 35-50) class ProcedureParamStorage: """ The storage for procedure param """ def __init__(self, index): """ The quantum param object needs to know its index. :param index: the quantum register index """ self.index = index
def __getitem__(self, index): """ Get the procedure params according to the index. Create the register when it does not exist. :param index: :return: ProcedureParamStorage """ value = self.paramsDict.get(index) if value is not None: return value value = ProcedureParamStorage(index) self.paramsDict[index] = value return value
35
50
#!/usr/bin/python3 # -*- coding: utf8 -*- # Copyright (c) 2020 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Procedure Params """ class ProcedureParams: """ The procedure params dict """ def __init__(self): """ The constructor of the ProcedureParams class """ self.paramsDict = {} # the inner data for procedure params dict def __getitem__(self, index): """ Get the procedure params according to the index. Create the register when it does not exist. :param index: :return: ProcedureParamStorage """ value = self.paramsDict.get(index) if value is not None: return value value = ProcedureParamStorage(index) self.paramsDict[index] = value return value class ProcedureParamStorage: """ The storage for procedure param """ def __init__(self, index): """ The quantum param object needs to know its index. :param index: the quantum register index """ self.index = index
build_model
Build DCE using the initialized attributes Args: norm: boolean, wheher to add a normalization layer at the begining of the autoencoder act: string, keras activation function name for autoencoder
""" DeepChEmbed (DCE) Models """ from dimreducer import DeepAutoEncoder from cluster import KMeansLayer from cluster import KMeans from keras import Model from keras import optimizers from keras.utils import normalize import numpy as np class DCE(): """ The class to build a deep chemical embedding model. Attributes: autoencoder_dims: a list of dimensions for encoder, the first element as input dimension, and the last one as hidden layer dimension. n_clusters: int, number of clusters for clustering layer. alpha: float, parameters for soft label assigning. update_interval: int, indicating every number of epoches, the harhened labels will be upadated and/or convergence cretia will be examed. max_iteration: int, maximum iteration for the combined training clustering_tol: float, convergence cretia for clustering layer model: keras Model variable HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE training, up to 9th order """ HARDENING_FUNCS = { 1: lambda x: x, 3: lambda x: (-2*x + 3) * x**2, 5: lambda x: ((6*x - 15)*x + 10) * x**3, 7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4, 9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5} def __init__(self, autoencoder_dims, n_clusters, update_interval=50, max_iteration=1e4, clustering_tol=1e-4, alpha=1.0): """Construtor of DCE. """ self.autoencoder_dims = autoencoder_dims self.n_clusters = n_clusters self.alpha = alpha self.update_interval = update_interval self.max_iteration = max_iteration self.clustering_tol = clustering_tol self.model = None return # MASKED: build_model function (lines 52-68) def train_model(self, data_train, labels_train=None, data_test=None, labels_test=None, verbose=1, compiled=False, clustering_loss='kld', decoder_loss='mse',clustering_loss_weight=0.5, hardening_order=1, hardening_strength=2.0, compiled=False, optimizer='adam', lr=0.001, decay=0.0): """Train DCE Model: If labels_train are not present, train DCE model in a unsupervised learning process; otherwise, train DCE model in a supervised learning process. Args: data_train: input training data labels_train: true labels of traning data data_test: input test data labels_test: true lables of testing data verbose: 0, turn off the screen prints clustering_loss: string, clustering layer loss function decoder_loss:, string, decoder loss function clustering_loss_weight: float in [0,1], w_c, harderning_order: odd int, the order of hardening function harderning_strength: float >=1.0, the streng of the harderning compiled: boolean, indicating if the model is compiled or not optmizer: string, keras optimizers lr: learning rate dacay: learning rate dacay Returns: train_loss: training loss test_loss: only if data_test and labels_test are not None in supervised learning process """ if (not compiled): assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0 if optimizer == 'adam': dce_optimizer = optimizers.Adam(lr=lr,decay=decay) elif optimizer == 'sgd': dce_optimizer = optimizers.sgd(lr=lr,decay=decay) else: raise Exception('Input optimizer was not found') self.model.compile(loss={'clustering': clustering_loss, 'decoder_output': decoder_loss}, loss_weights=[clustering_loss_weight, 1 - clustering_loss_weight], optimizer=dce_optimizer) if (labels_train is not None): supervised_learning = True if verbose >= 1: print('Starting supervised learning') else: supervised_learning = False if verbose >= 1: print('Starting unsupervised learning') # initializing model by using sklean-Kmeans as guess kmeans_init = KMeans(n_clusters=self.n_clusters) kmeans_init.build_model() encoder = Model(inputs=self.model.input, outputs=self.model.get_layer(\ name='embedding_layer').output) kmeans_init.model.fit(encoder.predict(data_train)) y_pred_last = kmeans_init.model.labels_ self.model.get_layer(name='clustering').\ set_weights([kmeans_init.model.cluster_centers_]) # Prepare training: p disctribution methods if not supervised_learning: # Unsupervised Learning assert hardening_order in DCE.HARDENING_FUNCS.keys() assert hardening_strength >= 1.0 h_func = DCE.HARDENING_FUNCS[hardening_order] else: # Supervised Learning assert len(labels_train) == len(data_train) assert len(np.unique(labels_train)) == self.n_clusters p = np.zeros(shape=(len(labels_train), self.n_clusters)) for i in range(len(labels_train)): p[i][labels_train[i]] = 1.0 if data_test is not None: assert len(labels_test) == len(data_test) assert len(np.unique(labels_test)) == self.n_clusters p_test = np.zeros(shape=(len(labels_test), self.n_clusters)) for i in range(len(labels_test)): p_test[i][labels_test[i]] = 1.0 validation_loss = [] # training start: loss = [] for iteration in range(int(self.max_iteration)): if iteration % self.update_interval == 0: # updating p for unsupervised learning process q, _ = self.model.predict(data_train) if not supervised_learning: p = DCE.hardening(q, h_func, hardening_strength) # get label change i y_pred = q.argmax(1) delta_label_i = np.sum(y_pred != y_pred_last).\ astype(np.float32) / y_pred.shape[0] y_pred_last = y_pred # exam convergence if iteration > 0 and delta_label_i < self.clustering_tol: print(str(delta_label_i) +' < ' + str(self.clustering_tol)) print('Reached tolerance threshold. Stopping training.') break loss.append(self.model.train_on_batch(x=data_train, y=[p,data_train])) if supervised_learning and data_test is not None: validation_loss.append(self.model.test_on_batch( x=data_test, y=[p_test,data_test])) if verbose > 0 and iteration % self.update_interval == 0: print('Epoch: ' + str(iteration)) if verbose == 1: print(' Total_loss = ' + str(loss[iteration][0]) + ';Delta_label = ' + str(delta_label_i)) print(' Clustering_loss = ' + str(loss[iteration][1]) + '; Decoder_loss = ' + str(loss[iteration][2])) if iteration == self.max_iteration - 1: print('Reached maximum iteration. Stopping training.') if data_test is None: return np.array(loss).T else: return [np.array(loss).T, np.array(validation_loss).T] @staticmethod def hardening(q, h_func, stength): """hardening distribution P and return Q Args: q: input distributions. h_func: input harderning function. strength: hardening strength. returns: p: hardened and normatlized distributions. """ q = h_func(q) weight = q ** stength / q.sum(0) return (weight.T / weight.sum(1)).T
def build_model(self, norm=True, act='relu'): """Build DCE using the initialized attributes Args: norm: boolean, wheher to add a normalization layer at the begining of the autoencoder act: string, keras activation function name for autoencoder """ autoencoder = DeepAutoEncoder(self.autoencoder_dims, act) autoencoder.build_model(norm=norm) embeding = autoencoder.model.get_layer(name='embedding_layer').output clustering = KMeansLayer(self.n_clusters, alpha=self.alpha, name='clustering')(embeding) self.model = Model(inputs=autoencoder.model.input, outputs=[clustering,autoencoder.model.output]) return
52
68
""" DeepChEmbed (DCE) Models """ from dimreducer import DeepAutoEncoder from cluster import KMeansLayer from cluster import KMeans from keras import Model from keras import optimizers from keras.utils import normalize import numpy as np class DCE(): """ The class to build a deep chemical embedding model. Attributes: autoencoder_dims: a list of dimensions for encoder, the first element as input dimension, and the last one as hidden layer dimension. n_clusters: int, number of clusters for clustering layer. alpha: float, parameters for soft label assigning. update_interval: int, indicating every number of epoches, the harhened labels will be upadated and/or convergence cretia will be examed. max_iteration: int, maximum iteration for the combined training clustering_tol: float, convergence cretia for clustering layer model: keras Model variable HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE training, up to 9th order """ HARDENING_FUNCS = { 1: lambda x: x, 3: lambda x: (-2*x + 3) * x**2, 5: lambda x: ((6*x - 15)*x + 10) * x**3, 7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4, 9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5} def __init__(self, autoencoder_dims, n_clusters, update_interval=50, max_iteration=1e4, clustering_tol=1e-4, alpha=1.0): """Construtor of DCE. """ self.autoencoder_dims = autoencoder_dims self.n_clusters = n_clusters self.alpha = alpha self.update_interval = update_interval self.max_iteration = max_iteration self.clustering_tol = clustering_tol self.model = None return def build_model(self, norm=True, act='relu'): """Build DCE using the initialized attributes Args: norm: boolean, wheher to add a normalization layer at the begining of the autoencoder act: string, keras activation function name for autoencoder """ autoencoder = DeepAutoEncoder(self.autoencoder_dims, act) autoencoder.build_model(norm=norm) embeding = autoencoder.model.get_layer(name='embedding_layer').output clustering = KMeansLayer(self.n_clusters, alpha=self.alpha, name='clustering')(embeding) self.model = Model(inputs=autoencoder.model.input, outputs=[clustering,autoencoder.model.output]) return def train_model(self, data_train, labels_train=None, data_test=None, labels_test=None, verbose=1, compiled=False, clustering_loss='kld', decoder_loss='mse',clustering_loss_weight=0.5, hardening_order=1, hardening_strength=2.0, compiled=False, optimizer='adam', lr=0.001, decay=0.0): """Train DCE Model: If labels_train are not present, train DCE model in a unsupervised learning process; otherwise, train DCE model in a supervised learning process. Args: data_train: input training data labels_train: true labels of traning data data_test: input test data labels_test: true lables of testing data verbose: 0, turn off the screen prints clustering_loss: string, clustering layer loss function decoder_loss:, string, decoder loss function clustering_loss_weight: float in [0,1], w_c, harderning_order: odd int, the order of hardening function harderning_strength: float >=1.0, the streng of the harderning compiled: boolean, indicating if the model is compiled or not optmizer: string, keras optimizers lr: learning rate dacay: learning rate dacay Returns: train_loss: training loss test_loss: only if data_test and labels_test are not None in supervised learning process """ if (not compiled): assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0 if optimizer == 'adam': dce_optimizer = optimizers.Adam(lr=lr,decay=decay) elif optimizer == 'sgd': dce_optimizer = optimizers.sgd(lr=lr,decay=decay) else: raise Exception('Input optimizer was not found') self.model.compile(loss={'clustering': clustering_loss, 'decoder_output': decoder_loss}, loss_weights=[clustering_loss_weight, 1 - clustering_loss_weight], optimizer=dce_optimizer) if (labels_train is not None): supervised_learning = True if verbose >= 1: print('Starting supervised learning') else: supervised_learning = False if verbose >= 1: print('Starting unsupervised learning') # initializing model by using sklean-Kmeans as guess kmeans_init = KMeans(n_clusters=self.n_clusters) kmeans_init.build_model() encoder = Model(inputs=self.model.input, outputs=self.model.get_layer(\ name='embedding_layer').output) kmeans_init.model.fit(encoder.predict(data_train)) y_pred_last = kmeans_init.model.labels_ self.model.get_layer(name='clustering').\ set_weights([kmeans_init.model.cluster_centers_]) # Prepare training: p disctribution methods if not supervised_learning: # Unsupervised Learning assert hardening_order in DCE.HARDENING_FUNCS.keys() assert hardening_strength >= 1.0 h_func = DCE.HARDENING_FUNCS[hardening_order] else: # Supervised Learning assert len(labels_train) == len(data_train) assert len(np.unique(labels_train)) == self.n_clusters p = np.zeros(shape=(len(labels_train), self.n_clusters)) for i in range(len(labels_train)): p[i][labels_train[i]] = 1.0 if data_test is not None: assert len(labels_test) == len(data_test) assert len(np.unique(labels_test)) == self.n_clusters p_test = np.zeros(shape=(len(labels_test), self.n_clusters)) for i in range(len(labels_test)): p_test[i][labels_test[i]] = 1.0 validation_loss = [] # training start: loss = [] for iteration in range(int(self.max_iteration)): if iteration % self.update_interval == 0: # updating p for unsupervised learning process q, _ = self.model.predict(data_train) if not supervised_learning: p = DCE.hardening(q, h_func, hardening_strength) # get label change i y_pred = q.argmax(1) delta_label_i = np.sum(y_pred != y_pred_last).\ astype(np.float32) / y_pred.shape[0] y_pred_last = y_pred # exam convergence if iteration > 0 and delta_label_i < self.clustering_tol: print(str(delta_label_i) +' < ' + str(self.clustering_tol)) print('Reached tolerance threshold. Stopping training.') break loss.append(self.model.train_on_batch(x=data_train, y=[p,data_train])) if supervised_learning and data_test is not None: validation_loss.append(self.model.test_on_batch( x=data_test, y=[p_test,data_test])) if verbose > 0 and iteration % self.update_interval == 0: print('Epoch: ' + str(iteration)) if verbose == 1: print(' Total_loss = ' + str(loss[iteration][0]) + ';Delta_label = ' + str(delta_label_i)) print(' Clustering_loss = ' + str(loss[iteration][1]) + '; Decoder_loss = ' + str(loss[iteration][2])) if iteration == self.max_iteration - 1: print('Reached maximum iteration. Stopping training.') if data_test is None: return np.array(loss).T else: return [np.array(loss).T, np.array(validation_loss).T] @staticmethod def hardening(q, h_func, stength): """hardening distribution P and return Q Args: q: input distributions. h_func: input harderning function. strength: hardening strength. returns: p: hardened and normatlized distributions. """ q = h_func(q) weight = q ** stength / q.sum(0) return (weight.T / weight.sum(1)).T
hardening
hardening distribution P and return Q Args: q: input distributions. h_func: input harderning function. strength: hardening strength. returns: p: hardened and normatlized distributions.
""" DeepChEmbed (DCE) Models """ from dimreducer import DeepAutoEncoder from cluster import KMeansLayer from cluster import KMeans from keras import Model from keras import optimizers from keras.utils import normalize import numpy as np class DCE(): """ The class to build a deep chemical embedding model. Attributes: autoencoder_dims: a list of dimensions for encoder, the first element as input dimension, and the last one as hidden layer dimension. n_clusters: int, number of clusters for clustering layer. alpha: float, parameters for soft label assigning. update_interval: int, indicating every number of epoches, the harhened labels will be upadated and/or convergence cretia will be examed. max_iteration: int, maximum iteration for the combined training clustering_tol: float, convergence cretia for clustering layer model: keras Model variable HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE training, up to 9th order """ HARDENING_FUNCS = { 1: lambda x: x, 3: lambda x: (-2*x + 3) * x**2, 5: lambda x: ((6*x - 15)*x + 10) * x**3, 7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4, 9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5} def __init__(self, autoencoder_dims, n_clusters, update_interval=50, max_iteration=1e4, clustering_tol=1e-4, alpha=1.0): """Construtor of DCE. """ self.autoencoder_dims = autoencoder_dims self.n_clusters = n_clusters self.alpha = alpha self.update_interval = update_interval self.max_iteration = max_iteration self.clustering_tol = clustering_tol self.model = None return def build_model(self, norm=True, act='relu'): """Build DCE using the initialized attributes Args: norm: boolean, wheher to add a normalization layer at the begining of the autoencoder act: string, keras activation function name for autoencoder """ autoencoder = DeepAutoEncoder(self.autoencoder_dims, act) autoencoder.build_model(norm=norm) embeding = autoencoder.model.get_layer(name='embedding_layer').output clustering = KMeansLayer(self.n_clusters, alpha=self.alpha, name='clustering')(embeding) self.model = Model(inputs=autoencoder.model.input, outputs=[clustering,autoencoder.model.output]) return def train_model(self, data_train, labels_train=None, data_test=None, labels_test=None, verbose=1, compiled=False, clustering_loss='kld', decoder_loss='mse',clustering_loss_weight=0.5, hardening_order=1, hardening_strength=2.0, compiled=False, optimizer='adam', lr=0.001, decay=0.0): """Train DCE Model: If labels_train are not present, train DCE model in a unsupervised learning process; otherwise, train DCE model in a supervised learning process. Args: data_train: input training data labels_train: true labels of traning data data_test: input test data labels_test: true lables of testing data verbose: 0, turn off the screen prints clustering_loss: string, clustering layer loss function decoder_loss:, string, decoder loss function clustering_loss_weight: float in [0,1], w_c, harderning_order: odd int, the order of hardening function harderning_strength: float >=1.0, the streng of the harderning compiled: boolean, indicating if the model is compiled or not optmizer: string, keras optimizers lr: learning rate dacay: learning rate dacay Returns: train_loss: training loss test_loss: only if data_test and labels_test are not None in supervised learning process """ if (not compiled): assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0 if optimizer == 'adam': dce_optimizer = optimizers.Adam(lr=lr,decay=decay) elif optimizer == 'sgd': dce_optimizer = optimizers.sgd(lr=lr,decay=decay) else: raise Exception('Input optimizer was not found') self.model.compile(loss={'clustering': clustering_loss, 'decoder_output': decoder_loss}, loss_weights=[clustering_loss_weight, 1 - clustering_loss_weight], optimizer=dce_optimizer) if (labels_train is not None): supervised_learning = True if verbose >= 1: print('Starting supervised learning') else: supervised_learning = False if verbose >= 1: print('Starting unsupervised learning') # initializing model by using sklean-Kmeans as guess kmeans_init = KMeans(n_clusters=self.n_clusters) kmeans_init.build_model() encoder = Model(inputs=self.model.input, outputs=self.model.get_layer(\ name='embedding_layer').output) kmeans_init.model.fit(encoder.predict(data_train)) y_pred_last = kmeans_init.model.labels_ self.model.get_layer(name='clustering').\ set_weights([kmeans_init.model.cluster_centers_]) # Prepare training: p disctribution methods if not supervised_learning: # Unsupervised Learning assert hardening_order in DCE.HARDENING_FUNCS.keys() assert hardening_strength >= 1.0 h_func = DCE.HARDENING_FUNCS[hardening_order] else: # Supervised Learning assert len(labels_train) == len(data_train) assert len(np.unique(labels_train)) == self.n_clusters p = np.zeros(shape=(len(labels_train), self.n_clusters)) for i in range(len(labels_train)): p[i][labels_train[i]] = 1.0 if data_test is not None: assert len(labels_test) == len(data_test) assert len(np.unique(labels_test)) == self.n_clusters p_test = np.zeros(shape=(len(labels_test), self.n_clusters)) for i in range(len(labels_test)): p_test[i][labels_test[i]] = 1.0 validation_loss = [] # training start: loss = [] for iteration in range(int(self.max_iteration)): if iteration % self.update_interval == 0: # updating p for unsupervised learning process q, _ = self.model.predict(data_train) if not supervised_learning: p = DCE.hardening(q, h_func, hardening_strength) # get label change i y_pred = q.argmax(1) delta_label_i = np.sum(y_pred != y_pred_last).\ astype(np.float32) / y_pred.shape[0] y_pred_last = y_pred # exam convergence if iteration > 0 and delta_label_i < self.clustering_tol: print(str(delta_label_i) +' < ' + str(self.clustering_tol)) print('Reached tolerance threshold. Stopping training.') break loss.append(self.model.train_on_batch(x=data_train, y=[p,data_train])) if supervised_learning and data_test is not None: validation_loss.append(self.model.test_on_batch( x=data_test, y=[p_test,data_test])) if verbose > 0 and iteration % self.update_interval == 0: print('Epoch: ' + str(iteration)) if verbose == 1: print(' Total_loss = ' + str(loss[iteration][0]) + ';Delta_label = ' + str(delta_label_i)) print(' Clustering_loss = ' + str(loss[iteration][1]) + '; Decoder_loss = ' + str(loss[iteration][2])) if iteration == self.max_iteration - 1: print('Reached maximum iteration. Stopping training.') if data_test is None: return np.array(loss).T else: return [np.array(loss).T, np.array(validation_loss).T] # MASKED: hardening function (lines 207-222)
@staticmethod def hardening(q, h_func, stength): """hardening distribution P and return Q Args: q: input distributions. h_func: input harderning function. strength: hardening strength. returns: p: hardened and normatlized distributions. """ q = h_func(q) weight = q ** stength / q.sum(0) return (weight.T / weight.sum(1)).T
207
222
""" DeepChEmbed (DCE) Models """ from dimreducer import DeepAutoEncoder from cluster import KMeansLayer from cluster import KMeans from keras import Model from keras import optimizers from keras.utils import normalize import numpy as np class DCE(): """ The class to build a deep chemical embedding model. Attributes: autoencoder_dims: a list of dimensions for encoder, the first element as input dimension, and the last one as hidden layer dimension. n_clusters: int, number of clusters for clustering layer. alpha: float, parameters for soft label assigning. update_interval: int, indicating every number of epoches, the harhened labels will be upadated and/or convergence cretia will be examed. max_iteration: int, maximum iteration for the combined training clustering_tol: float, convergence cretia for clustering layer model: keras Model variable HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE training, up to 9th order """ HARDENING_FUNCS = { 1: lambda x: x, 3: lambda x: (-2*x + 3) * x**2, 5: lambda x: ((6*x - 15)*x + 10) * x**3, 7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4, 9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5} def __init__(self, autoencoder_dims, n_clusters, update_interval=50, max_iteration=1e4, clustering_tol=1e-4, alpha=1.0): """Construtor of DCE. """ self.autoencoder_dims = autoencoder_dims self.n_clusters = n_clusters self.alpha = alpha self.update_interval = update_interval self.max_iteration = max_iteration self.clustering_tol = clustering_tol self.model = None return def build_model(self, norm=True, act='relu'): """Build DCE using the initialized attributes Args: norm: boolean, wheher to add a normalization layer at the begining of the autoencoder act: string, keras activation function name for autoencoder """ autoencoder = DeepAutoEncoder(self.autoencoder_dims, act) autoencoder.build_model(norm=norm) embeding = autoencoder.model.get_layer(name='embedding_layer').output clustering = KMeansLayer(self.n_clusters, alpha=self.alpha, name='clustering')(embeding) self.model = Model(inputs=autoencoder.model.input, outputs=[clustering,autoencoder.model.output]) return def train_model(self, data_train, labels_train=None, data_test=None, labels_test=None, verbose=1, compiled=False, clustering_loss='kld', decoder_loss='mse',clustering_loss_weight=0.5, hardening_order=1, hardening_strength=2.0, compiled=False, optimizer='adam', lr=0.001, decay=0.0): """Train DCE Model: If labels_train are not present, train DCE model in a unsupervised learning process; otherwise, train DCE model in a supervised learning process. Args: data_train: input training data labels_train: true labels of traning data data_test: input test data labels_test: true lables of testing data verbose: 0, turn off the screen prints clustering_loss: string, clustering layer loss function decoder_loss:, string, decoder loss function clustering_loss_weight: float in [0,1], w_c, harderning_order: odd int, the order of hardening function harderning_strength: float >=1.0, the streng of the harderning compiled: boolean, indicating if the model is compiled or not optmizer: string, keras optimizers lr: learning rate dacay: learning rate dacay Returns: train_loss: training loss test_loss: only if data_test and labels_test are not None in supervised learning process """ if (not compiled): assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0 if optimizer == 'adam': dce_optimizer = optimizers.Adam(lr=lr,decay=decay) elif optimizer == 'sgd': dce_optimizer = optimizers.sgd(lr=lr,decay=decay) else: raise Exception('Input optimizer was not found') self.model.compile(loss={'clustering': clustering_loss, 'decoder_output': decoder_loss}, loss_weights=[clustering_loss_weight, 1 - clustering_loss_weight], optimizer=dce_optimizer) if (labels_train is not None): supervised_learning = True if verbose >= 1: print('Starting supervised learning') else: supervised_learning = False if verbose >= 1: print('Starting unsupervised learning') # initializing model by using sklean-Kmeans as guess kmeans_init = KMeans(n_clusters=self.n_clusters) kmeans_init.build_model() encoder = Model(inputs=self.model.input, outputs=self.model.get_layer(\ name='embedding_layer').output) kmeans_init.model.fit(encoder.predict(data_train)) y_pred_last = kmeans_init.model.labels_ self.model.get_layer(name='clustering').\ set_weights([kmeans_init.model.cluster_centers_]) # Prepare training: p disctribution methods if not supervised_learning: # Unsupervised Learning assert hardening_order in DCE.HARDENING_FUNCS.keys() assert hardening_strength >= 1.0 h_func = DCE.HARDENING_FUNCS[hardening_order] else: # Supervised Learning assert len(labels_train) == len(data_train) assert len(np.unique(labels_train)) == self.n_clusters p = np.zeros(shape=(len(labels_train), self.n_clusters)) for i in range(len(labels_train)): p[i][labels_train[i]] = 1.0 if data_test is not None: assert len(labels_test) == len(data_test) assert len(np.unique(labels_test)) == self.n_clusters p_test = np.zeros(shape=(len(labels_test), self.n_clusters)) for i in range(len(labels_test)): p_test[i][labels_test[i]] = 1.0 validation_loss = [] # training start: loss = [] for iteration in range(int(self.max_iteration)): if iteration % self.update_interval == 0: # updating p for unsupervised learning process q, _ = self.model.predict(data_train) if not supervised_learning: p = DCE.hardening(q, h_func, hardening_strength) # get label change i y_pred = q.argmax(1) delta_label_i = np.sum(y_pred != y_pred_last).\ astype(np.float32) / y_pred.shape[0] y_pred_last = y_pred # exam convergence if iteration > 0 and delta_label_i < self.clustering_tol: print(str(delta_label_i) +' < ' + str(self.clustering_tol)) print('Reached tolerance threshold. Stopping training.') break loss.append(self.model.train_on_batch(x=data_train, y=[p,data_train])) if supervised_learning and data_test is not None: validation_loss.append(self.model.test_on_batch( x=data_test, y=[p_test,data_test])) if verbose > 0 and iteration % self.update_interval == 0: print('Epoch: ' + str(iteration)) if verbose == 1: print(' Total_loss = ' + str(loss[iteration][0]) + ';Delta_label = ' + str(delta_label_i)) print(' Clustering_loss = ' + str(loss[iteration][1]) + '; Decoder_loss = ' + str(loss[iteration][2])) if iteration == self.max_iteration - 1: print('Reached maximum iteration. Stopping training.') if data_test is None: return np.array(loss).T else: return [np.array(loss).T, np.array(validation_loss).T] @staticmethod def hardening(q, h_func, stength): """hardening distribution P and return Q Args: q: input distributions. h_func: input harderning function. strength: hardening strength. returns: p: hardened and normatlized distributions. """ q = h_func(q) weight = q ** stength / q.sum(0) return (weight.T / weight.sum(1)).T
authenticate_active
Generate a WLS 'success' response based on interaction with the user This function creates a WLS response specifying that the principal was authenticated based on 'fresh' interaction with the user (e.g. input of a username and password). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS auth (str): the authentication method used by the principal. life (int): if specified, the validity (in seconds) of the principal's session with the WLS. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards.
import datetime from . import status from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType from .signing import Key from .response import AuthResponse class AuthPrincipal: def __init__(self, userid, auth_methods, ptags=None, session_expiry=None): self.userid = userid self.auth_methods = auth_methods if ptags is None: ptags = [] self.ptags = ptags self.session_expiry = session_expiry class LoginService: """High-level interface to implement a web login service (WLS). This class provides a convenient interface for implementing a WLS with any authentication backend. It is intended to be instantiated with a single private key, which is used to sign the responses it generates. Mechanisms deemed useful for WLS implementation are provided: - storing the list of supported authentication methods, and checking whether the WLS and a WAA's request have an method in common - checking whether the protocol version specified in the WAA request is supported by `ucam_wls` These mechanisms can optionally be turned off. Attributes: key (ucam_wls.signing.Key): a private key to be used to sign responses auth_methods (list): a list of supported authentication methods """ def __init__(self, key, auth_methods): if not isinstance(key, Key): raise TypeError("key must be a ucam_wls.signing.Key instance") self.key = key self.auth_methods = auth_methods def have_mutual_auth_type(self, request): if request.aauth and any(request.aauth): return set(request.aauth) & set(self.auth_methods) != set() else: return True def _pre_response(self, request, skip_handling_check, check_auth_types=True): if not skip_handling_check: if not request.data_valid: raise InvalidAuthRequest if check_auth_types and not self.have_mutual_auth_type(request): raise NoMutualAuthType( "WLS supports %s; WAA wants one of %s" % ( self.auth_methods, request.aauth ) ) if not request.version_supported: raise ProtocolVersionUnsupported(request.ver) def _finish_response(self, response, sign=True, force_signature=False): if sign or response.requires_signature: if not response.is_signed or force_signature: self.key.sign(response) return response # MASKED: authenticate_active function (lines 68-108) def authenticate_passive(self, request, principal, sso=[], sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on a pre-existing identity This function creates a WLS response specifying that the principal was authenticated based on previous successful authentication (e.g. an existing WLS session cookie). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS sso (list): a list of strings indicating the authentication methods previously used for authentication by the principal. If an empty list is passed, `principal.auth_methods` will be used. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == True: raise ValueError("WAA demanded active authentication (iact == 'yes')") if len(sso) == 0: sso = principal.auth_methods if len(sso) == 0: raise ValueError("no authentication methods specified for `sso`") if principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) else: life = None response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, sso=sso, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign) def generate_failure(self, code, request, msg='', sign=True, skip_handling_check=False, *args, **kwargs): """Generate a response indicating failure. This is to be used in all cases where the outcome of user interaction is not success. This function will refuse to handle a request where the 'fail' parameter is 'yes' (in which case the WLS must not redirect back to the WAA). Args: code (int): the response status code. Values specified in the protocol are available as constants under `ucam_wls.status`. request (AuthRequest): the original WAA request msg (str): an optional message that could be shown to the end user by the WAA sign (bool): whether to sign the response or not. *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Note: Signatures on WLS responses indicating a non-success can optionally be signed. In the interests of security, the default in this function is to go ahead and sign anyway, but this can be turned off if really desired. """ self._pre_response(request, skip_handling_check, check_auth_types=False) if request.fail: raise ValueError("WAA specified that WLS must not redirect " "back to it on failure") if code == status.SUCCESS: raise ValueError("Failure responses must not have success status") response = AuthResponse.respond_to_request( request=request, code=code, *args, **kwargs ) return self._finish_response(response=response, sign=sign)
def authenticate_active(self, request, principal, auth, life=None, sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on interaction with the user This function creates a WLS response specifying that the principal was authenticated based on 'fresh' interaction with the user (e.g. input of a username and password). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS auth (str): the authentication method used by the principal. life (int): if specified, the validity (in seconds) of the principal's session with the WLS. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == False: raise ValueError("WAA demanded passive authentication (iact == 'no')") if life is None and principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, auth=auth, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign)
68
108
import datetime from . import status from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType from .signing import Key from .response import AuthResponse class AuthPrincipal: def __init__(self, userid, auth_methods, ptags=None, session_expiry=None): self.userid = userid self.auth_methods = auth_methods if ptags is None: ptags = [] self.ptags = ptags self.session_expiry = session_expiry class LoginService: """High-level interface to implement a web login service (WLS). This class provides a convenient interface for implementing a WLS with any authentication backend. It is intended to be instantiated with a single private key, which is used to sign the responses it generates. Mechanisms deemed useful for WLS implementation are provided: - storing the list of supported authentication methods, and checking whether the WLS and a WAA's request have an method in common - checking whether the protocol version specified in the WAA request is supported by `ucam_wls` These mechanisms can optionally be turned off. Attributes: key (ucam_wls.signing.Key): a private key to be used to sign responses auth_methods (list): a list of supported authentication methods """ def __init__(self, key, auth_methods): if not isinstance(key, Key): raise TypeError("key must be a ucam_wls.signing.Key instance") self.key = key self.auth_methods = auth_methods def have_mutual_auth_type(self, request): if request.aauth and any(request.aauth): return set(request.aauth) & set(self.auth_methods) != set() else: return True def _pre_response(self, request, skip_handling_check, check_auth_types=True): if not skip_handling_check: if not request.data_valid: raise InvalidAuthRequest if check_auth_types and not self.have_mutual_auth_type(request): raise NoMutualAuthType( "WLS supports %s; WAA wants one of %s" % ( self.auth_methods, request.aauth ) ) if not request.version_supported: raise ProtocolVersionUnsupported(request.ver) def _finish_response(self, response, sign=True, force_signature=False): if sign or response.requires_signature: if not response.is_signed or force_signature: self.key.sign(response) return response def authenticate_active(self, request, principal, auth, life=None, sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on interaction with the user This function creates a WLS response specifying that the principal was authenticated based on 'fresh' interaction with the user (e.g. input of a username and password). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS auth (str): the authentication method used by the principal. life (int): if specified, the validity (in seconds) of the principal's session with the WLS. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == False: raise ValueError("WAA demanded passive authentication (iact == 'no')") if life is None and principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, auth=auth, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign) def authenticate_passive(self, request, principal, sso=[], sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on a pre-existing identity This function creates a WLS response specifying that the principal was authenticated based on previous successful authentication (e.g. an existing WLS session cookie). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS sso (list): a list of strings indicating the authentication methods previously used for authentication by the principal. If an empty list is passed, `principal.auth_methods` will be used. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == True: raise ValueError("WAA demanded active authentication (iact == 'yes')") if len(sso) == 0: sso = principal.auth_methods if len(sso) == 0: raise ValueError("no authentication methods specified for `sso`") if principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) else: life = None response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, sso=sso, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign) def generate_failure(self, code, request, msg='', sign=True, skip_handling_check=False, *args, **kwargs): """Generate a response indicating failure. This is to be used in all cases where the outcome of user interaction is not success. This function will refuse to handle a request where the 'fail' parameter is 'yes' (in which case the WLS must not redirect back to the WAA). Args: code (int): the response status code. Values specified in the protocol are available as constants under `ucam_wls.status`. request (AuthRequest): the original WAA request msg (str): an optional message that could be shown to the end user by the WAA sign (bool): whether to sign the response or not. *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Note: Signatures on WLS responses indicating a non-success can optionally be signed. In the interests of security, the default in this function is to go ahead and sign anyway, but this can be turned off if really desired. """ self._pre_response(request, skip_handling_check, check_auth_types=False) if request.fail: raise ValueError("WAA specified that WLS must not redirect " "back to it on failure") if code == status.SUCCESS: raise ValueError("Failure responses must not have success status") response = AuthResponse.respond_to_request( request=request, code=code, *args, **kwargs ) return self._finish_response(response=response, sign=sign)
authenticate_passive
Generate a WLS 'success' response based on a pre-existing identity This function creates a WLS response specifying that the principal was authenticated based on previous successful authentication (e.g. an existing WLS session cookie). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS sso (list): a list of strings indicating the authentication methods previously used for authentication by the principal. If an empty list is passed, `principal.auth_methods` will be used. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards.
import datetime from . import status from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType from .signing import Key from .response import AuthResponse class AuthPrincipal: def __init__(self, userid, auth_methods, ptags=None, session_expiry=None): self.userid = userid self.auth_methods = auth_methods if ptags is None: ptags = [] self.ptags = ptags self.session_expiry = session_expiry class LoginService: """High-level interface to implement a web login service (WLS). This class provides a convenient interface for implementing a WLS with any authentication backend. It is intended to be instantiated with a single private key, which is used to sign the responses it generates. Mechanisms deemed useful for WLS implementation are provided: - storing the list of supported authentication methods, and checking whether the WLS and a WAA's request have an method in common - checking whether the protocol version specified in the WAA request is supported by `ucam_wls` These mechanisms can optionally be turned off. Attributes: key (ucam_wls.signing.Key): a private key to be used to sign responses auth_methods (list): a list of supported authentication methods """ def __init__(self, key, auth_methods): if not isinstance(key, Key): raise TypeError("key must be a ucam_wls.signing.Key instance") self.key = key self.auth_methods = auth_methods def have_mutual_auth_type(self, request): if request.aauth and any(request.aauth): return set(request.aauth) & set(self.auth_methods) != set() else: return True def _pre_response(self, request, skip_handling_check, check_auth_types=True): if not skip_handling_check: if not request.data_valid: raise InvalidAuthRequest if check_auth_types and not self.have_mutual_auth_type(request): raise NoMutualAuthType( "WLS supports %s; WAA wants one of %s" % ( self.auth_methods, request.aauth ) ) if not request.version_supported: raise ProtocolVersionUnsupported(request.ver) def _finish_response(self, response, sign=True, force_signature=False): if sign or response.requires_signature: if not response.is_signed or force_signature: self.key.sign(response) return response def authenticate_active(self, request, principal, auth, life=None, sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on interaction with the user This function creates a WLS response specifying that the principal was authenticated based on 'fresh' interaction with the user (e.g. input of a username and password). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS auth (str): the authentication method used by the principal. life (int): if specified, the validity (in seconds) of the principal's session with the WLS. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == False: raise ValueError("WAA demanded passive authentication (iact == 'no')") if life is None and principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, auth=auth, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign) # MASKED: authenticate_passive function (lines 110-158) def generate_failure(self, code, request, msg='', sign=True, skip_handling_check=False, *args, **kwargs): """Generate a response indicating failure. This is to be used in all cases where the outcome of user interaction is not success. This function will refuse to handle a request where the 'fail' parameter is 'yes' (in which case the WLS must not redirect back to the WAA). Args: code (int): the response status code. Values specified in the protocol are available as constants under `ucam_wls.status`. request (AuthRequest): the original WAA request msg (str): an optional message that could be shown to the end user by the WAA sign (bool): whether to sign the response or not. *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Note: Signatures on WLS responses indicating a non-success can optionally be signed. In the interests of security, the default in this function is to go ahead and sign anyway, but this can be turned off if really desired. """ self._pre_response(request, skip_handling_check, check_auth_types=False) if request.fail: raise ValueError("WAA specified that WLS must not redirect " "back to it on failure") if code == status.SUCCESS: raise ValueError("Failure responses must not have success status") response = AuthResponse.respond_to_request( request=request, code=code, *args, **kwargs ) return self._finish_response(response=response, sign=sign)
def authenticate_passive(self, request, principal, sso=[], sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on a pre-existing identity This function creates a WLS response specifying that the principal was authenticated based on previous successful authentication (e.g. an existing WLS session cookie). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS sso (list): a list of strings indicating the authentication methods previously used for authentication by the principal. If an empty list is passed, `principal.auth_methods` will be used. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == True: raise ValueError("WAA demanded active authentication (iact == 'yes')") if len(sso) == 0: sso = principal.auth_methods if len(sso) == 0: raise ValueError("no authentication methods specified for `sso`") if principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) else: life = None response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, sso=sso, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign)
110
158
import datetime from . import status from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType from .signing import Key from .response import AuthResponse class AuthPrincipal: def __init__(self, userid, auth_methods, ptags=None, session_expiry=None): self.userid = userid self.auth_methods = auth_methods if ptags is None: ptags = [] self.ptags = ptags self.session_expiry = session_expiry class LoginService: """High-level interface to implement a web login service (WLS). This class provides a convenient interface for implementing a WLS with any authentication backend. It is intended to be instantiated with a single private key, which is used to sign the responses it generates. Mechanisms deemed useful for WLS implementation are provided: - storing the list of supported authentication methods, and checking whether the WLS and a WAA's request have an method in common - checking whether the protocol version specified in the WAA request is supported by `ucam_wls` These mechanisms can optionally be turned off. Attributes: key (ucam_wls.signing.Key): a private key to be used to sign responses auth_methods (list): a list of supported authentication methods """ def __init__(self, key, auth_methods): if not isinstance(key, Key): raise TypeError("key must be a ucam_wls.signing.Key instance") self.key = key self.auth_methods = auth_methods def have_mutual_auth_type(self, request): if request.aauth and any(request.aauth): return set(request.aauth) & set(self.auth_methods) != set() else: return True def _pre_response(self, request, skip_handling_check, check_auth_types=True): if not skip_handling_check: if not request.data_valid: raise InvalidAuthRequest if check_auth_types and not self.have_mutual_auth_type(request): raise NoMutualAuthType( "WLS supports %s; WAA wants one of %s" % ( self.auth_methods, request.aauth ) ) if not request.version_supported: raise ProtocolVersionUnsupported(request.ver) def _finish_response(self, response, sign=True, force_signature=False): if sign or response.requires_signature: if not response.is_signed or force_signature: self.key.sign(response) return response def authenticate_active(self, request, principal, auth, life=None, sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on interaction with the user This function creates a WLS response specifying that the principal was authenticated based on 'fresh' interaction with the user (e.g. input of a username and password). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS auth (str): the authentication method used by the principal. life (int): if specified, the validity (in seconds) of the principal's session with the WLS. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == False: raise ValueError("WAA demanded passive authentication (iact == 'no')") if life is None and principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, auth=auth, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign) def authenticate_passive(self, request, principal, sso=[], sign=True, skip_handling_check=False, *args, **kwargs): """Generate a WLS 'success' response based on a pre-existing identity This function creates a WLS response specifying that the principal was authenticated based on previous successful authentication (e.g. an existing WLS session cookie). Args: request (AuthRequest): the original WAA request principal (AuthPrincipal): the principal authenticated by the WLS sso (list): a list of strings indicating the authentication methods previously used for authentication by the principal. If an empty list is passed, `principal.auth_methods` will be used. sign (bool): whether to sign the response or not. Recommended to leave this at the default value of `True` (see warning below). *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Warning: Responses indicating successful authentication *MUST* be signed by the WLS. It is recommended that you leave `sign` set to `True`, or make sure to sign the response manually afterwards. """ self._pre_response(request, skip_handling_check) if request.iact == True: raise ValueError("WAA demanded active authentication (iact == 'yes')") if len(sso) == 0: sso = principal.auth_methods if len(sso) == 0: raise ValueError("no authentication methods specified for `sso`") if principal.session_expiry is not None: life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds()) else: life = None response = AuthResponse.respond_to_request( request=request, code=status.SUCCESS, principal=principal.userid, sso=sso, ptags=principal.ptags, life=life, *args, **kwargs ) return self._finish_response(response=response, sign=sign) def generate_failure(self, code, request, msg='', sign=True, skip_handling_check=False, *args, **kwargs): """Generate a response indicating failure. This is to be used in all cases where the outcome of user interaction is not success. This function will refuse to handle a request where the 'fail' parameter is 'yes' (in which case the WLS must not redirect back to the WAA). Args: code (int): the response status code. Values specified in the protocol are available as constants under `ucam_wls.status`. request (AuthRequest): the original WAA request msg (str): an optional message that could be shown to the end user by the WAA sign (bool): whether to sign the response or not. *args: passed to `AuthResponse.respond_to_request` **kwargs: passed to `AuthResponse.respond_to_request` Returns: An `AuthResponse` instance matching the given arguments. Note: Signatures on WLS responses indicating a non-success can optionally be signed. In the interests of security, the default in this function is to go ahead and sign anyway, but this can be turned off if really desired. """ self._pre_response(request, skip_handling_check, check_auth_types=False) if request.fail: raise ValueError("WAA specified that WLS must not redirect " "back to it on failure") if code == status.SUCCESS: raise ValueError("Failure responses must not have success status") response = AuthResponse.respond_to_request( request=request, code=code, *args, **kwargs ) return self._finish_response(response=response, sign=sign)
__init__
:param bool enable_magnetic_store_writes: A flag to enable magnetic store writes. :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'TableMagneticStoreWriteProperties', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration', 'TableRetentionProperties', ] @pulumi.output_type class TableMagneticStoreWriteProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "enableMagneticStoreWrites": suggest = "enable_magnetic_store_writes" elif key == "magneticStoreRejectedDataLocation": suggest = "magnetic_store_rejected_data_location" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().get(key, default) # MASKED: __init__ function (lines 40-50) @property @pulumi.getter(name="enableMagneticStoreWrites") def enable_magnetic_store_writes(self) -> Optional[bool]: """ A flag to enable magnetic store writes. """ return pulumi.get(self, "enable_magnetic_store_writes") @property @pulumi.getter(name="magneticStoreRejectedDataLocation") def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']: """ The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ return pulumi.get(self, "magnetic_store_rejected_data_location") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "s3Configuration": suggest = "s3_configuration" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().get(key, default) def __init__(__self__, *, s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None): """ :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ if s3_configuration is not None: pulumi.set(__self__, "s3_configuration", s3_configuration) @property @pulumi.getter(name="s3Configuration") def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']: """ Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ return pulumi.get(self, "s3_configuration") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "bucketName": suggest = "bucket_name" elif key == "encryptionOption": suggest = "encryption_option" elif key == "kmsKeyId": suggest = "kms_key_id" elif key == "objectKeyPrefix": suggest = "object_key_prefix" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, bucket_name: Optional[str] = None, encryption_option: Optional[str] = None, kms_key_id: Optional[str] = None, object_key_prefix: Optional[str] = None): """ :param str bucket_name: Bucket name of the customer S3 bucket. :param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. :param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key. :param str object_key_prefix: Object key prefix for the customer S3 location. """ if bucket_name is not None: pulumi.set(__self__, "bucket_name", bucket_name) if encryption_option is not None: pulumi.set(__self__, "encryption_option", encryption_option) if kms_key_id is not None: pulumi.set(__self__, "kms_key_id", kms_key_id) if object_key_prefix is not None: pulumi.set(__self__, "object_key_prefix", object_key_prefix) @property @pulumi.getter(name="bucketName") def bucket_name(self) -> Optional[str]: """ Bucket name of the customer S3 bucket. """ return pulumi.get(self, "bucket_name") @property @pulumi.getter(name="encryptionOption") def encryption_option(self) -> Optional[str]: """ Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. """ return pulumi.get(self, "encryption_option") @property @pulumi.getter(name="kmsKeyId") def kms_key_id(self) -> Optional[str]: """ KMS key arn for the customer s3 location when encrypting with a KMS managed key. """ return pulumi.get(self, "kms_key_id") @property @pulumi.getter(name="objectKeyPrefix") def object_key_prefix(self) -> Optional[str]: """ Object key prefix for the customer S3 location. """ return pulumi.get(self, "object_key_prefix") @pulumi.output_type class TableRetentionProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "magneticStoreRetentionPeriodInDays": suggest = "magnetic_store_retention_period_in_days" elif key == "memoryStoreRetentionPeriodInHours": suggest = "memory_store_retention_period_in_hours" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableRetentionProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableRetentionProperties.__key_warning(key) return super().get(key, default) def __init__(__self__, *, magnetic_store_retention_period_in_days: int, memory_store_retention_period_in_hours: int): """ :param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. :param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days) pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours) @property @pulumi.getter(name="magneticStoreRetentionPeriodInDays") def magnetic_store_retention_period_in_days(self) -> int: """ The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. """ return pulumi.get(self, "magnetic_store_retention_period_in_days") @property @pulumi.getter(name="memoryStoreRetentionPeriodInHours") def memory_store_retention_period_in_hours(self) -> int: """ The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ return pulumi.get(self, "memory_store_retention_period_in_hours")
def __init__(__self__, *, enable_magnetic_store_writes: Optional[bool] = None, magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None): """ :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes. :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ if enable_magnetic_store_writes is not None: pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes) if magnetic_store_rejected_data_location is not None: pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location)
40
50
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'TableMagneticStoreWriteProperties', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration', 'TableRetentionProperties', ] @pulumi.output_type class TableMagneticStoreWriteProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "enableMagneticStoreWrites": suggest = "enable_magnetic_store_writes" elif key == "magneticStoreRejectedDataLocation": suggest = "magnetic_store_rejected_data_location" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().get(key, default) def __init__(__self__, *, enable_magnetic_store_writes: Optional[bool] = None, magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None): """ :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes. :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ if enable_magnetic_store_writes is not None: pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes) if magnetic_store_rejected_data_location is not None: pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location) @property @pulumi.getter(name="enableMagneticStoreWrites") def enable_magnetic_store_writes(self) -> Optional[bool]: """ A flag to enable magnetic store writes. """ return pulumi.get(self, "enable_magnetic_store_writes") @property @pulumi.getter(name="magneticStoreRejectedDataLocation") def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']: """ The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ return pulumi.get(self, "magnetic_store_rejected_data_location") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "s3Configuration": suggest = "s3_configuration" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().get(key, default) def __init__(__self__, *, s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None): """ :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ if s3_configuration is not None: pulumi.set(__self__, "s3_configuration", s3_configuration) @property @pulumi.getter(name="s3Configuration") def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']: """ Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ return pulumi.get(self, "s3_configuration") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "bucketName": suggest = "bucket_name" elif key == "encryptionOption": suggest = "encryption_option" elif key == "kmsKeyId": suggest = "kms_key_id" elif key == "objectKeyPrefix": suggest = "object_key_prefix" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, bucket_name: Optional[str] = None, encryption_option: Optional[str] = None, kms_key_id: Optional[str] = None, object_key_prefix: Optional[str] = None): """ :param str bucket_name: Bucket name of the customer S3 bucket. :param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. :param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key. :param str object_key_prefix: Object key prefix for the customer S3 location. """ if bucket_name is not None: pulumi.set(__self__, "bucket_name", bucket_name) if encryption_option is not None: pulumi.set(__self__, "encryption_option", encryption_option) if kms_key_id is not None: pulumi.set(__self__, "kms_key_id", kms_key_id) if object_key_prefix is not None: pulumi.set(__self__, "object_key_prefix", object_key_prefix) @property @pulumi.getter(name="bucketName") def bucket_name(self) -> Optional[str]: """ Bucket name of the customer S3 bucket. """ return pulumi.get(self, "bucket_name") @property @pulumi.getter(name="encryptionOption") def encryption_option(self) -> Optional[str]: """ Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. """ return pulumi.get(self, "encryption_option") @property @pulumi.getter(name="kmsKeyId") def kms_key_id(self) -> Optional[str]: """ KMS key arn for the customer s3 location when encrypting with a KMS managed key. """ return pulumi.get(self, "kms_key_id") @property @pulumi.getter(name="objectKeyPrefix") def object_key_prefix(self) -> Optional[str]: """ Object key prefix for the customer S3 location. """ return pulumi.get(self, "object_key_prefix") @pulumi.output_type class TableRetentionProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "magneticStoreRetentionPeriodInDays": suggest = "magnetic_store_retention_period_in_days" elif key == "memoryStoreRetentionPeriodInHours": suggest = "memory_store_retention_period_in_hours" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableRetentionProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableRetentionProperties.__key_warning(key) return super().get(key, default) def __init__(__self__, *, magnetic_store_retention_period_in_days: int, memory_store_retention_period_in_hours: int): """ :param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. :param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days) pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours) @property @pulumi.getter(name="magneticStoreRetentionPeriodInDays") def magnetic_store_retention_period_in_days(self) -> int: """ The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. """ return pulumi.get(self, "magnetic_store_retention_period_in_days") @property @pulumi.getter(name="memoryStoreRetentionPeriodInHours") def memory_store_retention_period_in_hours(self) -> int: """ The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ return pulumi.get(self, "memory_store_retention_period_in_hours")
__init__
:param str bucket_name: Bucket name of the customer S3 bucket. :param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. :param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key. :param str object_key_prefix: Object key prefix for the customer S3 location.
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'TableMagneticStoreWriteProperties', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration', 'TableRetentionProperties', ] @pulumi.output_type class TableMagneticStoreWriteProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "enableMagneticStoreWrites": suggest = "enable_magnetic_store_writes" elif key == "magneticStoreRejectedDataLocation": suggest = "magnetic_store_rejected_data_location" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().get(key, default) def __init__(__self__, *, enable_magnetic_store_writes: Optional[bool] = None, magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None): """ :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes. :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ if enable_magnetic_store_writes is not None: pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes) if magnetic_store_rejected_data_location is not None: pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location) @property @pulumi.getter(name="enableMagneticStoreWrites") def enable_magnetic_store_writes(self) -> Optional[bool]: """ A flag to enable magnetic store writes. """ return pulumi.get(self, "enable_magnetic_store_writes") @property @pulumi.getter(name="magneticStoreRejectedDataLocation") def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']: """ The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ return pulumi.get(self, "magnetic_store_rejected_data_location") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "s3Configuration": suggest = "s3_configuration" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().get(key, default) def __init__(__self__, *, s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None): """ :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ if s3_configuration is not None: pulumi.set(__self__, "s3_configuration", s3_configuration) @property @pulumi.getter(name="s3Configuration") def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']: """ Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ return pulumi.get(self, "s3_configuration") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "bucketName": suggest = "bucket_name" elif key == "encryptionOption": suggest = "encryption_option" elif key == "kmsKeyId": suggest = "kms_key_id" elif key == "objectKeyPrefix": suggest = "object_key_prefix" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().get(key, default) # MASKED: __init__ function (lines 130-148) @property @pulumi.getter(name="bucketName") def bucket_name(self) -> Optional[str]: """ Bucket name of the customer S3 bucket. """ return pulumi.get(self, "bucket_name") @property @pulumi.getter(name="encryptionOption") def encryption_option(self) -> Optional[str]: """ Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. """ return pulumi.get(self, "encryption_option") @property @pulumi.getter(name="kmsKeyId") def kms_key_id(self) -> Optional[str]: """ KMS key arn for the customer s3 location when encrypting with a KMS managed key. """ return pulumi.get(self, "kms_key_id") @property @pulumi.getter(name="objectKeyPrefix") def object_key_prefix(self) -> Optional[str]: """ Object key prefix for the customer S3 location. """ return pulumi.get(self, "object_key_prefix") @pulumi.output_type class TableRetentionProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "magneticStoreRetentionPeriodInDays": suggest = "magnetic_store_retention_period_in_days" elif key == "memoryStoreRetentionPeriodInHours": suggest = "memory_store_retention_period_in_hours" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableRetentionProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableRetentionProperties.__key_warning(key) return super().get(key, default) def __init__(__self__, *, magnetic_store_retention_period_in_days: int, memory_store_retention_period_in_hours: int): """ :param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. :param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days) pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours) @property @pulumi.getter(name="magneticStoreRetentionPeriodInDays") def magnetic_store_retention_period_in_days(self) -> int: """ The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. """ return pulumi.get(self, "magnetic_store_retention_period_in_days") @property @pulumi.getter(name="memoryStoreRetentionPeriodInHours") def memory_store_retention_period_in_hours(self) -> int: """ The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ return pulumi.get(self, "memory_store_retention_period_in_hours")
def __init__(__self__, *, bucket_name: Optional[str] = None, encryption_option: Optional[str] = None, kms_key_id: Optional[str] = None, object_key_prefix: Optional[str] = None): """ :param str bucket_name: Bucket name of the customer S3 bucket. :param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. :param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key. :param str object_key_prefix: Object key prefix for the customer S3 location. """ if bucket_name is not None: pulumi.set(__self__, "bucket_name", bucket_name) if encryption_option is not None: pulumi.set(__self__, "encryption_option", encryption_option) if kms_key_id is not None: pulumi.set(__self__, "kms_key_id", kms_key_id) if object_key_prefix is not None: pulumi.set(__self__, "object_key_prefix", object_key_prefix)
130
148
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'TableMagneticStoreWriteProperties', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation', 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration', 'TableRetentionProperties', ] @pulumi.output_type class TableMagneticStoreWriteProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "enableMagneticStoreWrites": suggest = "enable_magnetic_store_writes" elif key == "magneticStoreRejectedDataLocation": suggest = "magnetic_store_rejected_data_location" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWriteProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWriteProperties.__key_warning(key) return super().get(key, default) def __init__(__self__, *, enable_magnetic_store_writes: Optional[bool] = None, magnetic_store_rejected_data_location: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation'] = None): """ :param bool enable_magnetic_store_writes: A flag to enable magnetic store writes. :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationArgs' magnetic_store_rejected_data_location: The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ if enable_magnetic_store_writes is not None: pulumi.set(__self__, "enable_magnetic_store_writes", enable_magnetic_store_writes) if magnetic_store_rejected_data_location is not None: pulumi.set(__self__, "magnetic_store_rejected_data_location", magnetic_store_rejected_data_location) @property @pulumi.getter(name="enableMagneticStoreWrites") def enable_magnetic_store_writes(self) -> Optional[bool]: """ A flag to enable magnetic store writes. """ return pulumi.get(self, "enable_magnetic_store_writes") @property @pulumi.getter(name="magneticStoreRejectedDataLocation") def magnetic_store_rejected_data_location(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation']: """ The location to write error reports for records rejected asynchronously during magnetic store writes. See Magnetic Store Rejected Data Location below for more details. """ return pulumi.get(self, "magnetic_store_rejected_data_location") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "s3Configuration": suggest = "s3_configuration" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocation.__key_warning(key) return super().get(key, default) def __init__(__self__, *, s3_configuration: Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration'] = None): """ :param 'TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3ConfigurationArgs' s3_configuration: Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ if s3_configuration is not None: pulumi.set(__self__, "s3_configuration", s3_configuration) @property @pulumi.getter(name="s3Configuration") def s3_configuration(self) -> Optional['outputs.TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration']: """ Configuration of an S3 location to write error reports for records rejected, asynchronously, during magnetic store writes. See S3 Configuration below for more details. """ return pulumi.get(self, "s3_configuration") @pulumi.output_type class TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "bucketName": suggest = "bucket_name" elif key == "encryptionOption": suggest = "encryption_option" elif key == "kmsKeyId": suggest = "kms_key_id" elif key == "objectKeyPrefix": suggest = "object_key_prefix" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableMagneticStoreWritePropertiesMagneticStoreRejectedDataLocationS3Configuration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, bucket_name: Optional[str] = None, encryption_option: Optional[str] = None, kms_key_id: Optional[str] = None, object_key_prefix: Optional[str] = None): """ :param str bucket_name: Bucket name of the customer S3 bucket. :param str encryption_option: Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. :param str kms_key_id: KMS key arn for the customer s3 location when encrypting with a KMS managed key. :param str object_key_prefix: Object key prefix for the customer S3 location. """ if bucket_name is not None: pulumi.set(__self__, "bucket_name", bucket_name) if encryption_option is not None: pulumi.set(__self__, "encryption_option", encryption_option) if kms_key_id is not None: pulumi.set(__self__, "kms_key_id", kms_key_id) if object_key_prefix is not None: pulumi.set(__self__, "object_key_prefix", object_key_prefix) @property @pulumi.getter(name="bucketName") def bucket_name(self) -> Optional[str]: """ Bucket name of the customer S3 bucket. """ return pulumi.get(self, "bucket_name") @property @pulumi.getter(name="encryptionOption") def encryption_option(self) -> Optional[str]: """ Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key. Valid values are `SSE_KMS` and `SSE_S3`. """ return pulumi.get(self, "encryption_option") @property @pulumi.getter(name="kmsKeyId") def kms_key_id(self) -> Optional[str]: """ KMS key arn for the customer s3 location when encrypting with a KMS managed key. """ return pulumi.get(self, "kms_key_id") @property @pulumi.getter(name="objectKeyPrefix") def object_key_prefix(self) -> Optional[str]: """ Object key prefix for the customer S3 location. """ return pulumi.get(self, "object_key_prefix") @pulumi.output_type class TableRetentionProperties(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "magneticStoreRetentionPeriodInDays": suggest = "magnetic_store_retention_period_in_days" elif key == "memoryStoreRetentionPeriodInHours": suggest = "memory_store_retention_period_in_hours" if suggest: pulumi.log.warn(f"Key '{key}' not found in TableRetentionProperties. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: TableRetentionProperties.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: TableRetentionProperties.__key_warning(key) return super().get(key, default) def __init__(__self__, *, magnetic_store_retention_period_in_days: int, memory_store_retention_period_in_hours: int): """ :param int magnetic_store_retention_period_in_days: The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. :param int memory_store_retention_period_in_hours: The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ pulumi.set(__self__, "magnetic_store_retention_period_in_days", magnetic_store_retention_period_in_days) pulumi.set(__self__, "memory_store_retention_period_in_hours", memory_store_retention_period_in_hours) @property @pulumi.getter(name="magneticStoreRetentionPeriodInDays") def magnetic_store_retention_period_in_days(self) -> int: """ The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. """ return pulumi.get(self, "magnetic_store_retention_period_in_days") @property @pulumi.getter(name="memoryStoreRetentionPeriodInHours") def memory_store_retention_period_in_hours(self) -> int: """ The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. """ return pulumi.get(self, "memory_store_retention_period_in_hours")
post
Handle the POST request and sign up the user if form validation passes Returns: A redirect or a template with the validation errors
from flask import render_template, flash, redirect, url_for, request from flask.views import MethodView from app.middleware import auth from app.models.user import User from app.validators.register_form import RegisterForm from app.services import avatar_service class RegisterController(MethodView): @auth.optional def get(self): """ Show register form Returns: Register template with form """ return render_template('auth/register.html', form=RegisterForm()) # MASKED: post function (lines 21-45)
@auth.optional def post(self): """ Handle the POST request and sign up the user if form validation passes Returns: A redirect or a template with the validation errors """ form = RegisterForm() if form.validate_on_submit(): form.validate_username(form.username) avatar = 'no-image.png' if 'avatar' in request.files and request.files['avatar']: avatar = avatar_service.save(form.avatar.data) User.create(form.username.data, form.password.data, avatar) flash('Your account has been created. You may now login.', 'info') return redirect(url_for('login')) return render_template('auth/register.html', form=form)
21
45
from flask import render_template, flash, redirect, url_for, request from flask.views import MethodView from app.middleware import auth from app.models.user import User from app.validators.register_form import RegisterForm from app.services import avatar_service class RegisterController(MethodView): @auth.optional def get(self): """ Show register form Returns: Register template with form """ return render_template('auth/register.html', form=RegisterForm()) @auth.optional def post(self): """ Handle the POST request and sign up the user if form validation passes Returns: A redirect or a template with the validation errors """ form = RegisterForm() if form.validate_on_submit(): form.validate_username(form.username) avatar = 'no-image.png' if 'avatar' in request.files and request.files['avatar']: avatar = avatar_service.save(form.avatar.data) User.create(form.username.data, form.password.data, avatar) flash('Your account has been created. You may now login.', 'info') return redirect(url_for('login')) return render_template('auth/register.html', form=form)
__init__
Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ # MASKED: __init__ function (lines 14-29) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1)
14
29
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
forward
Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results.
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) # MASKED: forward function (lines 31-43) class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output
31
43
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
__init__
Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers.
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" # MASKED: __init__ function (lines 51-67) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features)
51
67
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
_predict
Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors.
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) # MASKED: _predict function (lines 78-89) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1)
78
89
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
_train
Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig.
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) # MASKED: _train function (lines 91-110)
def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
91
110
"""Polynomial model class used by agents for building stuff. """ from torch import nn, optim import torch import torch.nn.functional as F from stock_trading_backend.agent.model import Model class NNModel(nn.Module): """Torch neural network model. """ def __init__(self, num_inputs, num_hidden_layers, num_inner_features): """Initializer for linear model. Args: num_inputs: the dimension of input data. num_hidden_layers: the number of hidden layers. num_inner_features: the number of features in the hidden layers """ super(NNModel, self).__init__() self.input_layer = nn.Linear(num_inputs, num_inner_features) hidden_layers = [] for _ in range(num_hidden_layers): hidden_layers.append(nn.Linear(num_inner_features, num_inner_features)) hidden_layers.append(nn.ReLU()) self.hidden_layers = nn.Sequential(*hidden_layers) self.output_layer = nn.Linear(num_inner_features, 1) def forward(self, input_tensor): """Forward pass on the neural network model. Args: input_tensor: the input tensor. Returns: Tensor with model results. """ output = F.relu(self.input_layer(input_tensor)) output = self.hidden_layers(output) output = self.output_layer(output) return output class NeuralNetworkModel(Model): """Neural netowrk model class. """ name = "neural_network_model" def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100): """Initializer for model class. Args: learning_rate: the learning rate of the model. num_hidden_layers: number of hidden layers in the network. num_inner_features: number of features in the hidden layers. """ super(NeuralNetworkModel, self).__init__() self.model = None self.optimizer = None self.criterion = nn.MSELoss() self.learning_rate = learning_rate self.num_hidden_layers = num_hidden_layers self.num_inner_features = num_inner_features self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers, num_inner_features) def _init_model(self, num_inputs): """Initializes internal linear model. Args: num_inputs: number of inputs that model will have. """ self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features) self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate) def _predict(self, state_action_tensor): """Use provided information to make a prediction. Args: state_action_tensor: pytorch tensor with state-action values. Returns: Predicted values for observation-action tensors. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) return self.model(state_action_tensor).detach().reshape(-1) def _train(self, state_action_tensor, expected_values_tensor): """Train the model for 1 epoch. Args: state_action_tensor: pytorch tensor with state-action expected_values. expected_values: pytorch tensor with expected values for each state-action. Returns: The loss before trainig. """ if self.model is None: self._init_model(state_action_tensor.shape[1]) self.optimizer.zero_grad() output = self.model(state_action_tensor) loss = self.criterion(output, expected_values_tensor) loss_value = loss.data.item() loss.backward() self.optimizer.step() return loss_value
getNetworkCellularGatewaySettingsDhcp
**List common DHCP settings of MGs** https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp - networkId (string)
class MGDHCPSettings(object): def __init__(self, session): super(MGDHCPSettings, self).__init__() self._session = session # MASKED: getNetworkCellularGatewaySettingsDhcp function (lines 6-20) def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs): """ **Update common DHCP settings of MGs** https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp - networkId (string) - dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'. - dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'. - dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom' """ kwargs.update(locals()) metadata = { 'tags': ['MG DHCP settings'], 'operation': 'updateNetworkCellularGatewaySettingsDhcp', } resource = f'/networks/{networkId}/cellularGateway/settings/dhcp' body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers'] payload = {k: v for (k, v) in kwargs.items() if k in body_params} return self._session.put(metadata, resource, payload)
def getNetworkCellularGatewaySettingsDhcp(self, networkId: str): """ **List common DHCP settings of MGs** https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp - networkId (string) """ metadata = { 'tags': ['MG DHCP settings'], 'operation': 'getNetworkCellularGatewaySettingsDhcp', } resource = f'/networks/{networkId}/cellularGateway/settings/dhcp' return self._session.get(metadata, resource)
6
20
class MGDHCPSettings(object): def __init__(self, session): super(MGDHCPSettings, self).__init__() self._session = session def getNetworkCellularGatewaySettingsDhcp(self, networkId: str): """ **List common DHCP settings of MGs** https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp - networkId (string) """ metadata = { 'tags': ['MG DHCP settings'], 'operation': 'getNetworkCellularGatewaySettingsDhcp', } resource = f'/networks/{networkId}/cellularGateway/settings/dhcp' return self._session.get(metadata, resource) def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs): """ **Update common DHCP settings of MGs** https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp - networkId (string) - dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'. - dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'. - dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom' """ kwargs.update(locals()) metadata = { 'tags': ['MG DHCP settings'], 'operation': 'updateNetworkCellularGatewaySettingsDhcp', } resource = f'/networks/{networkId}/cellularGateway/settings/dhcp' body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers'] payload = {k: v for (k, v) in kwargs.items() if k in body_params} return self._session.put(metadata, resource, payload)
blend
Blend image1 and image2 using 'factor'. A value of factor 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor. image2: An image Tensor. factor: A floating point value above 0.0. Returns: A blended image Tensor.
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Various ops for augmentation.""" import math import tensorflow as tf from tensorflow_addons import image as tfa_image # Default replace value REPLACE_VALUE = 128 # MASKED: blend function (lines 25-45) def wrap(image): """Returns 'image' with an extra channel set to all 1s.""" shape = tf.shape(image) extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype) extended = tf.concat([image, extended_channel], 2) return extended def unwrap(image): """Unwraps an image produced by wrap. Where there is a 0 in the last channel for every spatial position, the rest of the three channels in that spatial dimension are grayed (set to 128). Operations like translate and shear on a wrapped Tensor will leave 0s in empty locations. Some transformations look at the intensity of values to do preprocessing, and we want these empty pixels to assume the 'average' value, rather than pure black. Args: image: A 3D Image Tensor with 4 channels. Returns: image: A 3D image Tensor with 3 channels. """ image_shape = tf.shape(image) # Flatten the spatial dimensions. flattened_image = tf.reshape(image, [-1, image_shape[2]]) # Find all pixels where the last channel is zero. alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1) replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1], image.dtype) # Where they are zero, fill them in with 'replace'. flattened_image = tf.where( tf.equal(alpha_channel, 0), tf.ones_like(flattened_image, dtype=image.dtype) * replace, flattened_image) image = tf.reshape(flattened_image, image_shape) image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], image_shape[2] - 1]) return image def solarize(image, threshold=128): # For each pixel in the image, select the pixel # if the value is less than the threshold. # Otherwise, subtract 255 from the pixel. threshold = tf.saturate_cast(threshold, image.dtype) return tf.where(image < threshold, image, 255 - image) def solarize_add(image, addition=0, threshold=128): # For each pixel in the image less than threshold # we add 'addition' amount to it and then clip the # pixel value to be between 0 and 255. The value # of 'addition' is between -128 and 128 threshold = tf.saturate_cast(threshold, image.dtype) added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32) added_im = tf.saturate_cast(added_im, tf.uint8) return tf.where(image < threshold, added_im, image) def invert(image): """Inverts the image pixels.""" return 255 - tf.convert_to_tensor(image) def invert_blend(image, factor): """Implements blend of invert with original image.""" return blend(invert(image), image, factor) def color(image, factor): """Equivalent of PIL Color.""" degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image)) return blend(degenerate, image, factor) def contrast(image, factor): """Equivalent of PIL Contrast.""" grayscale_im = tf.image.rgb_to_grayscale(image) mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32)) mean = tf.saturate_cast(mean + 0.5, tf.uint8) degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean degenerate = tf.image.grayscale_to_rgb(degenerate) return blend(degenerate, image, factor) def brightness(image, factor): """Equivalent of PIL Brightness.""" degenerate = tf.zeros_like(image) return blend(degenerate, image, factor) def posterize(image, bits): """Equivalent of PIL Posterize.""" shift = tf.cast(8 - bits, image.dtype) return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) def rotate(image, degrees): """Equivalent of PIL Rotation.""" # Convert from degrees to radians degrees_to_radians = math.pi / 180.0 radians = degrees * degrees_to_radians # In practice, we should randomize the rotation degrees by flipping # it negatively half the time, but that's done on 'degrees' outside # of the function. image = tfa_image.transform_ops.rotate(wrap(image), radians) return unwrap(image) def translate_x(image, pixels): """Equivalent of PIL Translate in X dimension.""" image = tfa_image.translate_ops.translate(wrap(image), [-pixels, 0]) return unwrap(image) def translate_y(image, pixels): """Equivalent of PIL Translate in Y dimension.""" image = tfa_image.translate_ops.translate(wrap(image), [0, -pixels]) return unwrap(image) def shear_x(image, level): """Equivalent of PIL Shearing in X dimension.""" # Shear parallel to x axis is a projective transform # with a matrix form of: # [1 level # 0 1] image = tfa_image.transform_ops.transform( wrap(image), [1., level, 0., 0., 1., 0., 0., 0.]) return unwrap(image) def shear_y(image, level): """Equivalent of PIL Shearing in Y dimension.""" # Shear parallel to y axis is a projective transform # with a matrix form of: # [1 0 # level 1] image = tfa_image.transform_ops.transform( wrap(image), [1., 0., 0., level, 1., 0., 0., 0.]) return unwrap(image) def autocontrast(image): """Implements Autocontrast function from PIL using TF ops.""" def scale_channel(channel): """Scale the 2D image using the autocontrast rule.""" # A possibly cheaper version can be done using cumsum/unique_with_counts # over the histogram values, rather than iterating over the entire image. # to compute mins and maxes. lo = tf.cast(tf.reduce_min(channel), tf.float32) hi = tf.cast(tf.reduce_max(channel), tf.float32) # Scale the image, making the lowest value 0 and the highest value 255. def scale_values(im): scale = 255.0 / (hi - lo) offset = -lo * scale im = tf.cast(im, tf.float32) * scale + offset return tf.saturate_cast(im, tf.uint8) result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel) return result # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image[:, :, 0]) s2 = scale_channel(image[:, :, 1]) s3 = scale_channel(image[:, :, 2]) image = tf.stack([s1, s2, s3], 2) return image def autocontrast_blend(image, factor): """Implements blend of autocontrast with original image.""" return blend(autocontrast(image), image, factor) def sharpness(image, factor): """Implements Sharpness function from PIL using TF ops.""" orig_im = image image = tf.cast(image, tf.float32) # Make image 4D for conv operation image = tf.expand_dims(image, 0) # SMOOTH PIL Kernel kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13. # Tile across channel dimension kernel = tf.tile(kernel, [1, 1, 3, 1]) strides = [1, 1, 1, 1] degenerate = tf.nn.depthwise_conv2d( image, kernel, strides, padding='VALID', dilations=[1, 1]) degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0]) # For the borders of the resulting image, fill in the values of the # original image. mask = tf.ones_like(degenerate) padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]]) padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]]) result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im) # Blend the final result return blend(result, orig_im, factor) def equalize(image): """Implements Equalize function from PIL using TF ops.""" def scale_channel(im, c): """Scale the data in the channel to implement equalize.""" im = tf.cast(im[:, :, c], tf.int32) # Compute the histogram of the image channel. histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) # For the purposes of computing the step, filter out the nonzeros. nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1]) step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255 def build_lut(histo, step): # Compute the cumulative sum, shifting by step // 2 # and then normalization by step. lut = (tf.cumsum(histo) + (step // 2)) // step # Shift lut, prepending with 0. lut = tf.concat([[0], lut[:-1]], 0) # Clip the counts to be in range. This is done # in the C code for image.point. return tf.clip_by_value(lut, 0, 255) # If step is zero, return the original image. Otherwise, build # lut from the full histogram and step and then index from it. result = tf.cond( tf.equal(step, 0), lambda: im, lambda: tf.gather(build_lut(histo, step), im)) return tf.cast(result, tf.uint8) # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image, 0) s2 = scale_channel(image, 1) s3 = scale_channel(image, 2) image = tf.stack([s1, s2, s3], 2) return image def equalize_blend(image, factor): """Implements blend of equalize with original image.""" return blend(equalize(image), image, factor) def _convolve_image_with_kernel(image, kernel): num_channels = tf.shape(image)[-1] kernel = tf.tile(kernel, [1, 1, num_channels, 1]) image = tf.expand_dims(image, axis=0) convolved_im = tf.nn.depthwise_conv2d( tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME') # adding 0.5 for future rounding, same as in PIL: # https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long convolved_im = convolved_im + 0.5 return tf.squeeze(convolved_im, axis=0) def blur(image, factor): """Blur with the same kernel as ImageFilter.BLUR.""" # See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long # class BLUR(BuiltinFilter): # name = "Blur" # # fmt: off # filterargs = (5, 5), 16, 0, ( # 1, 1, 1, 1, 1, # 1, 0, 0, 0, 1, # 1, 0, 0, 0, 1, # 1, 0, 0, 0, 1, # 1, 1, 1, 1, 1, # ) # # fmt: on # # filterargs are following: # (kernel_size_x, kernel_size_y), divisor, offset, kernel # blur_kernel = tf.constant( [[1., 1., 1., 1., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.], [1., 1., 1., 1., 1.]], dtype=tf.float32, shape=[5, 5, 1, 1]) / 16.0 blurred_im = _convolve_image_with_kernel(image, blur_kernel) return blend(image, blurred_im, factor) def smooth(image, factor): """Smooth with the same kernel as ImageFilter.SMOOTH.""" # See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long # class SMOOTH(BuiltinFilter): # name = "Smooth" # # fmt: off # filterargs = (3, 3), 13, 0, ( # 1, 1, 1, # 1, 5, 1, # 1, 1, 1, # ) # # fmt: on # # filterargs are following: # (kernel_size_x, kernel_size_y), divisor, offset, kernel # smooth_kernel = tf.constant([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13.0 smoothed_im = _convolve_image_with_kernel(image, smooth_kernel) return blend(image, smoothed_im, factor) def rescale(image, level): """Rescales image and enlarged cornet.""" # See tf.image.ResizeMethod for full list size = image.shape[:2] scale = level * 0.25 scale_height = tf.cast(scale * size[0], tf.int32) scale_width = tf.cast(scale * size[1], tf.int32) cropped_image = tf.image.crop_to_bounding_box( image, offset_height=scale_height, offset_width=scale_width, target_height=size[0] - scale_height, target_width=size[1] - scale_width) rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC) return tf.saturate_cast(rescaled, tf.uint8) NAME_TO_FUNC = { 'Identity': tf.identity, 'AutoContrast': autocontrast, 'AutoContrastBlend': autocontrast_blend, 'Equalize': equalize, 'EqualizeBlend': equalize_blend, 'Invert': invert, 'InvertBlend': invert_blend, 'Rotate': rotate, 'Posterize': posterize, 'Solarize': solarize, 'SolarizeAdd': solarize_add, 'Color': color, 'Contrast': contrast, 'Brightness': brightness, 'Sharpness': sharpness, 'ShearX': shear_x, 'ShearY': shear_y, 'TranslateX': translate_x, 'TranslateY': translate_y, 'Blur': blur, 'Smooth': smooth, 'Rescale': rescale, }
def blend(image1, image2, factor): """Blend image1 and image2 using 'factor'. A value of factor 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor. image2: An image Tensor. factor: A floating point value above 0.0. Returns: A blended image Tensor. """ image1 = tf.cast(image1, tf.float32) image2 = tf.cast(image2, tf.float32) return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)
25
45
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Various ops for augmentation.""" import math import tensorflow as tf from tensorflow_addons import image as tfa_image # Default replace value REPLACE_VALUE = 128 def blend(image1, image2, factor): """Blend image1 and image2 using 'factor'. A value of factor 0.0 means only image1 is used. A value of 1.0 means only image2 is used. A value between 0.0 and 1.0 means we linearly interpolate the pixel values between the two images. A value greater than 1.0 "extrapolates" the difference between the two pixel values, and we clip the results to values between 0 and 255. Args: image1: An image Tensor. image2: An image Tensor. factor: A floating point value above 0.0. Returns: A blended image Tensor. """ image1 = tf.cast(image1, tf.float32) image2 = tf.cast(image2, tf.float32) return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8) def wrap(image): """Returns 'image' with an extra channel set to all 1s.""" shape = tf.shape(image) extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype) extended = tf.concat([image, extended_channel], 2) return extended def unwrap(image): """Unwraps an image produced by wrap. Where there is a 0 in the last channel for every spatial position, the rest of the three channels in that spatial dimension are grayed (set to 128). Operations like translate and shear on a wrapped Tensor will leave 0s in empty locations. Some transformations look at the intensity of values to do preprocessing, and we want these empty pixels to assume the 'average' value, rather than pure black. Args: image: A 3D Image Tensor with 4 channels. Returns: image: A 3D image Tensor with 3 channels. """ image_shape = tf.shape(image) # Flatten the spatial dimensions. flattened_image = tf.reshape(image, [-1, image_shape[2]]) # Find all pixels where the last channel is zero. alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1) replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1], image.dtype) # Where they are zero, fill them in with 'replace'. flattened_image = tf.where( tf.equal(alpha_channel, 0), tf.ones_like(flattened_image, dtype=image.dtype) * replace, flattened_image) image = tf.reshape(flattened_image, image_shape) image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], image_shape[2] - 1]) return image def solarize(image, threshold=128): # For each pixel in the image, select the pixel # if the value is less than the threshold. # Otherwise, subtract 255 from the pixel. threshold = tf.saturate_cast(threshold, image.dtype) return tf.where(image < threshold, image, 255 - image) def solarize_add(image, addition=0, threshold=128): # For each pixel in the image less than threshold # we add 'addition' amount to it and then clip the # pixel value to be between 0 and 255. The value # of 'addition' is between -128 and 128 threshold = tf.saturate_cast(threshold, image.dtype) added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32) added_im = tf.saturate_cast(added_im, tf.uint8) return tf.where(image < threshold, added_im, image) def invert(image): """Inverts the image pixels.""" return 255 - tf.convert_to_tensor(image) def invert_blend(image, factor): """Implements blend of invert with original image.""" return blend(invert(image), image, factor) def color(image, factor): """Equivalent of PIL Color.""" degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image)) return blend(degenerate, image, factor) def contrast(image, factor): """Equivalent of PIL Contrast.""" grayscale_im = tf.image.rgb_to_grayscale(image) mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32)) mean = tf.saturate_cast(mean + 0.5, tf.uint8) degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean degenerate = tf.image.grayscale_to_rgb(degenerate) return blend(degenerate, image, factor) def brightness(image, factor): """Equivalent of PIL Brightness.""" degenerate = tf.zeros_like(image) return blend(degenerate, image, factor) def posterize(image, bits): """Equivalent of PIL Posterize.""" shift = tf.cast(8 - bits, image.dtype) return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift) def rotate(image, degrees): """Equivalent of PIL Rotation.""" # Convert from degrees to radians degrees_to_radians = math.pi / 180.0 radians = degrees * degrees_to_radians # In practice, we should randomize the rotation degrees by flipping # it negatively half the time, but that's done on 'degrees' outside # of the function. image = tfa_image.transform_ops.rotate(wrap(image), radians) return unwrap(image) def translate_x(image, pixels): """Equivalent of PIL Translate in X dimension.""" image = tfa_image.translate_ops.translate(wrap(image), [-pixels, 0]) return unwrap(image) def translate_y(image, pixels): """Equivalent of PIL Translate in Y dimension.""" image = tfa_image.translate_ops.translate(wrap(image), [0, -pixels]) return unwrap(image) def shear_x(image, level): """Equivalent of PIL Shearing in X dimension.""" # Shear parallel to x axis is a projective transform # with a matrix form of: # [1 level # 0 1] image = tfa_image.transform_ops.transform( wrap(image), [1., level, 0., 0., 1., 0., 0., 0.]) return unwrap(image) def shear_y(image, level): """Equivalent of PIL Shearing in Y dimension.""" # Shear parallel to y axis is a projective transform # with a matrix form of: # [1 0 # level 1] image = tfa_image.transform_ops.transform( wrap(image), [1., 0., 0., level, 1., 0., 0., 0.]) return unwrap(image) def autocontrast(image): """Implements Autocontrast function from PIL using TF ops.""" def scale_channel(channel): """Scale the 2D image using the autocontrast rule.""" # A possibly cheaper version can be done using cumsum/unique_with_counts # over the histogram values, rather than iterating over the entire image. # to compute mins and maxes. lo = tf.cast(tf.reduce_min(channel), tf.float32) hi = tf.cast(tf.reduce_max(channel), tf.float32) # Scale the image, making the lowest value 0 and the highest value 255. def scale_values(im): scale = 255.0 / (hi - lo) offset = -lo * scale im = tf.cast(im, tf.float32) * scale + offset return tf.saturate_cast(im, tf.uint8) result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel) return result # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image[:, :, 0]) s2 = scale_channel(image[:, :, 1]) s3 = scale_channel(image[:, :, 2]) image = tf.stack([s1, s2, s3], 2) return image def autocontrast_blend(image, factor): """Implements blend of autocontrast with original image.""" return blend(autocontrast(image), image, factor) def sharpness(image, factor): """Implements Sharpness function from PIL using TF ops.""" orig_im = image image = tf.cast(image, tf.float32) # Make image 4D for conv operation image = tf.expand_dims(image, 0) # SMOOTH PIL Kernel kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13. # Tile across channel dimension kernel = tf.tile(kernel, [1, 1, 3, 1]) strides = [1, 1, 1, 1] degenerate = tf.nn.depthwise_conv2d( image, kernel, strides, padding='VALID', dilations=[1, 1]) degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0]) # For the borders of the resulting image, fill in the values of the # original image. mask = tf.ones_like(degenerate) padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]]) padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]]) result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im) # Blend the final result return blend(result, orig_im, factor) def equalize(image): """Implements Equalize function from PIL using TF ops.""" def scale_channel(im, c): """Scale the data in the channel to implement equalize.""" im = tf.cast(im[:, :, c], tf.int32) # Compute the histogram of the image channel. histo = tf.histogram_fixed_width(im, [0, 255], nbins=256) # For the purposes of computing the step, filter out the nonzeros. nonzero = tf.where(tf.not_equal(histo, 0)) nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1]) step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255 def build_lut(histo, step): # Compute the cumulative sum, shifting by step // 2 # and then normalization by step. lut = (tf.cumsum(histo) + (step // 2)) // step # Shift lut, prepending with 0. lut = tf.concat([[0], lut[:-1]], 0) # Clip the counts to be in range. This is done # in the C code for image.point. return tf.clip_by_value(lut, 0, 255) # If step is zero, return the original image. Otherwise, build # lut from the full histogram and step and then index from it. result = tf.cond( tf.equal(step, 0), lambda: im, lambda: tf.gather(build_lut(histo, step), im)) return tf.cast(result, tf.uint8) # Assumes RGB for now. Scales each channel independently # and then stacks the result. s1 = scale_channel(image, 0) s2 = scale_channel(image, 1) s3 = scale_channel(image, 2) image = tf.stack([s1, s2, s3], 2) return image def equalize_blend(image, factor): """Implements blend of equalize with original image.""" return blend(equalize(image), image, factor) def _convolve_image_with_kernel(image, kernel): num_channels = tf.shape(image)[-1] kernel = tf.tile(kernel, [1, 1, num_channels, 1]) image = tf.expand_dims(image, axis=0) convolved_im = tf.nn.depthwise_conv2d( tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME') # adding 0.5 for future rounding, same as in PIL: # https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long convolved_im = convolved_im + 0.5 return tf.squeeze(convolved_im, axis=0) def blur(image, factor): """Blur with the same kernel as ImageFilter.BLUR.""" # See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long # class BLUR(BuiltinFilter): # name = "Blur" # # fmt: off # filterargs = (5, 5), 16, 0, ( # 1, 1, 1, 1, 1, # 1, 0, 0, 0, 1, # 1, 0, 0, 0, 1, # 1, 0, 0, 0, 1, # 1, 1, 1, 1, 1, # ) # # fmt: on # # filterargs are following: # (kernel_size_x, kernel_size_y), divisor, offset, kernel # blur_kernel = tf.constant( [[1., 1., 1., 1., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.], [1., 1., 1., 1., 1.]], dtype=tf.float32, shape=[5, 5, 1, 1]) / 16.0 blurred_im = _convolve_image_with_kernel(image, blur_kernel) return blend(image, blurred_im, factor) def smooth(image, factor): """Smooth with the same kernel as ImageFilter.SMOOTH.""" # See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long # class SMOOTH(BuiltinFilter): # name = "Smooth" # # fmt: off # filterargs = (3, 3), 13, 0, ( # 1, 1, 1, # 1, 5, 1, # 1, 1, 1, # ) # # fmt: on # # filterargs are following: # (kernel_size_x, kernel_size_y), divisor, offset, kernel # smooth_kernel = tf.constant([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]], dtype=tf.float32, shape=[3, 3, 1, 1]) / 13.0 smoothed_im = _convolve_image_with_kernel(image, smooth_kernel) return blend(image, smoothed_im, factor) def rescale(image, level): """Rescales image and enlarged cornet.""" # See tf.image.ResizeMethod for full list size = image.shape[:2] scale = level * 0.25 scale_height = tf.cast(scale * size[0], tf.int32) scale_width = tf.cast(scale * size[1], tf.int32) cropped_image = tf.image.crop_to_bounding_box( image, offset_height=scale_height, offset_width=scale_width, target_height=size[0] - scale_height, target_width=size[1] - scale_width) rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC) return tf.saturate_cast(rescaled, tf.uint8) NAME_TO_FUNC = { 'Identity': tf.identity, 'AutoContrast': autocontrast, 'AutoContrastBlend': autocontrast_blend, 'Equalize': equalize, 'EqualizeBlend': equalize_blend, 'Invert': invert, 'InvertBlend': invert_blend, 'Rotate': rotate, 'Posterize': posterize, 'Solarize': solarize, 'SolarizeAdd': solarize_add, 'Color': color, 'Contrast': contrast, 'Brightness': brightness, 'Sharpness': sharpness, 'ShearX': shear_x, 'ShearY': shear_y, 'TranslateX': translate_x, 'TranslateY': translate_y, 'Blur': blur, 'Smooth': smooth, 'Rescale': rescale, }
get_book_quote_page
Find the ``<a>`` element pointing to the quote page of a book. Args: soup (bs4.element.Tag): Returns:
""" Scrape quotes, books and authors from ``Good Reads`` website. """ import bs4 from .utils import * def get_author_name(soup): """Get the author's name from its main page. Args: soup (bs4.element.Tag): connection to the author page. Returns: string: name of the author. Examples:: >>> from scrapereads import connect >>> url = 'https://www.goodreads.com/author/show/1077326' >>> soup = connect(url) >>> get_author_name(soup) J.K. Rowling """ author_h1 = soup.find('h1', attrs={'class': 'authorName'}) return author_h1.find('span').text def get_author_desc(soup): """Get the author description / biography. Args: soup (bs4.element.Tag): connection to the author page. Returns: str: long description of the author. Examples:: >>> from scrapereads import connect >>> url = 'https://www.goodreads.com/author/show/1077326' >>> soup = connect(url) >>> get_author_desc(soup) See also: Robert Galbraith Although she writes under the pen name J.K. Rowling, pronounced like rolling, her name when her first Harry Potter book was published was simply Joanne Rowling. ... """ author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'}) author_info_long = author_info_desc.findAll('span')[-1] long_desc = "" for sentence in author_info_long.children: if isinstance(sentence, bs4.element.Tag): if sentence.name == 'br': long_desc += '\n' else: long_desc += sentence.text else: long_desc += sentence long_desc = long_desc.replace('’', "'") return long_desc def get_author_info(soup): """Get all information from an author (genres, influences, website etc.). Args: soup (bs4.element.Tag): author page connection. Returns: dict """ container = soup.find('div', attrs={'class': 'rightContainer'}) author_info = {} data_div = container.find('br', attrs={'class': 'clear'}) while data_div: if data_div.name: data_class = data_div.get('class')[0] # Information section is finished if data_class == 'aboutAuthorInfo': break # Key elements elif data_class == 'dataTitle': key = data_div.text.strip() author_info[key] = [] # Born section if data_div.text == 'Born': data_div = data_div.next_sibling author_info[key].append(data_div.strip()) # Influences section elif data_div.text == 'Influences': data_div = data_div.next_sibling.next_sibling data_items = data_div.findAll('span')[-1].findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) # Member since section elif data_div.text == 'Member Since': data_div = data_div.next_sibling.next_sibling author_info[key].append(data_div.text.strip()) # Genre, website and other sections else: data_items = data_div.findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) data_div = data_div.next_sibling author_info.update({'Description': get_author_desc(soup)}) return author_info def scrape_quotes_container(soup): """Get the quote container from a quote page. Args: soup (bs4.element.Tag): connection to the quote page. Returns: bs4.element.Tag """ return soup.findAll('div', attrs={'class': 'quotes'}) def scrape_quotes(soup): """Retrieve all ``<div>`` quote element from a quote page. Args: soup (bs4.element.Tag): connection to the quote page. Returns: yield bs4.element.Tag """ for container_div in scrape_quotes_container(soup): quote_div = container_div.find('div', attrs={'class': 'quote'}) while quote_div: if quote_div.name == 'div' and quote_div.get('class') and 'quote' in quote_div.get('class'): yield quote_div quote_div = quote_div.next_sibling def get_quote_text(quote_div): """Get the text from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text. Returns: string """ quote_text = '' text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children for text in text_iterator: if text.name == 'br': quote_text += '\n' elif not text.name: quote_text += text.strip() quote_text = process_quote_text(quote_text) return quote_text def scrape_quote_tags(quote_div): """Scrape tags from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: yield ``<a>`` tags """ tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'}) if tags_container: for tag in tags_container.children: if tag.name == 'a': yield tag return None def get_quote_book(quote_div): """Get the reference (book) from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: bs4.element.Tag """ quote_details = quote_div.find('div', attrs={'class': 'quoteText'}) return quote_details.find('a', attrs={'class': 'authorOrTitle'}) def get_quote_author_name(quote_div): """Get the author's name from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: string """ quote_text = quote_div.find('div', attrs={'class': 'quoteText '}) author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text return remove_punctuation(author_name).title() def get_quote_likes(quote_div): """Get the likes ``<a>`` tag from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: bs4.element.Tag: ``<a>`` tag for likes. """ quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'}) return quote_footer.find('a', attrs={'class': 'smallText'}) # TODO: deprecate this def get_quote_name_id(quote_div): """Get the name and id of a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: tuple: id and name. """ quote_href = get_quote_likes(quote_div).get('href') quote_id = quote_href.split('/')[-1].split('-')[0] quote_name = '-'.join(quote_href.split('/')[-1].split('-')[1:]) return quote_id, quote_name def scrape_author_books(soup): """Retrieve books from an author's page. Args: soup (bs4.element.Tag): connection to an author books page. Returns: yield bs4.element.Tag: ``<tr>`` element. """ table_tr = soup.find('tr') while table_tr: if table_tr.name == 'tr': yield table_tr table_tr = table_tr.next_sibling def get_author_book_title(book_tr): """Get the book title ``<a>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: book title ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_title = get_author_book_title(book_tr) ... print(book_title.text.strip(), book_title.get('href')) The Bell Jar /book/show/6514.The_Bell_Jar Ariel /book/show/395090.Ariel The Collected Poems /book/show/31426.The_Collected_Poems The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath """ return book_tr.find('a', attrs={'class': 'bookTitle'}) def get_author_book_author(book_tr): """Get the author ``<a>`` element from a table ``<tr>`` element. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: author name ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_author = get_author_book_author(book_tr) ... print(book_author.text, book_author.get('href')) Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath """ return book_tr.find('a', attrs={'class': 'authorName'}) def get_author_book_ratings(book_tr): """Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: ratings ``<span>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... ratings_span = get_author_book_ratings(book_tr) ... print(ratings_span.contents[-1]) 4.55 avg rating — 2,414 ratings 3.77 avg rating — 1,689 ratings 4.28 avg rating — 892 ratings 4.54 avg rating — 490 ratings ... """ return book_tr.find('span', attrs={'class': 'minirating'}) def get_author_book_edition(book_tr): """Get the edition ``<a>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: book edition ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_edition = get_author_book_edition(book_tr) ... if book_edition: ... print(book_edition.text, book_edition.get('href')) ... print() 493 editions /work/editions/1385044-the-bell-jar 80 editions /work/editions/1185316-ariel 30 editions /work/editions/1003095-the-collected-poems 45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath ... """ book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) return book_details.find('a', attrs={'class': 'greyText'}) def get_author_book_date(book_tr): """Get the published date from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: int: date of publication Examples:: >>> for book_tr in scrape_author_books(soup): ... book_date = get_author_book_date(book_tr) ... print(book_date) None None 1958 2009 ... """ book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) book_publish = book_details.contents[-1].replace('—', '').replace('\n', '') book_date = book_publish.replace('published', '').strip() book_date = eval(book_date) if book_date != '' else None return book_date # MASKED: get_book_quote_page function (lines 380-392)
def get_book_quote_page(soup): """Find the ``<a>`` element pointing to the quote page of a book. Args: soup (bs4.element.Tag): Returns: """ quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'}) if quote_div: return quote_div[-1].find('a') return None
380
392
""" Scrape quotes, books and authors from ``Good Reads`` website. """ import bs4 from .utils import * def get_author_name(soup): """Get the author's name from its main page. Args: soup (bs4.element.Tag): connection to the author page. Returns: string: name of the author. Examples:: >>> from scrapereads import connect >>> url = 'https://www.goodreads.com/author/show/1077326' >>> soup = connect(url) >>> get_author_name(soup) J.K. Rowling """ author_h1 = soup.find('h1', attrs={'class': 'authorName'}) return author_h1.find('span').text def get_author_desc(soup): """Get the author description / biography. Args: soup (bs4.element.Tag): connection to the author page. Returns: str: long description of the author. Examples:: >>> from scrapereads import connect >>> url = 'https://www.goodreads.com/author/show/1077326' >>> soup = connect(url) >>> get_author_desc(soup) See also: Robert Galbraith Although she writes under the pen name J.K. Rowling, pronounced like rolling, her name when her first Harry Potter book was published was simply Joanne Rowling. ... """ author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'}) author_info_long = author_info_desc.findAll('span')[-1] long_desc = "" for sentence in author_info_long.children: if isinstance(sentence, bs4.element.Tag): if sentence.name == 'br': long_desc += '\n' else: long_desc += sentence.text else: long_desc += sentence long_desc = long_desc.replace('’', "'") return long_desc def get_author_info(soup): """Get all information from an author (genres, influences, website etc.). Args: soup (bs4.element.Tag): author page connection. Returns: dict """ container = soup.find('div', attrs={'class': 'rightContainer'}) author_info = {} data_div = container.find('br', attrs={'class': 'clear'}) while data_div: if data_div.name: data_class = data_div.get('class')[0] # Information section is finished if data_class == 'aboutAuthorInfo': break # Key elements elif data_class == 'dataTitle': key = data_div.text.strip() author_info[key] = [] # Born section if data_div.text == 'Born': data_div = data_div.next_sibling author_info[key].append(data_div.strip()) # Influences section elif data_div.text == 'Influences': data_div = data_div.next_sibling.next_sibling data_items = data_div.findAll('span')[-1].findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) # Member since section elif data_div.text == 'Member Since': data_div = data_div.next_sibling.next_sibling author_info[key].append(data_div.text.strip()) # Genre, website and other sections else: data_items = data_div.findAll('a') for data_a in data_items: author_info[key].append(data_a.text.strip()) data_div = data_div.next_sibling author_info.update({'Description': get_author_desc(soup)}) return author_info def scrape_quotes_container(soup): """Get the quote container from a quote page. Args: soup (bs4.element.Tag): connection to the quote page. Returns: bs4.element.Tag """ return soup.findAll('div', attrs={'class': 'quotes'}) def scrape_quotes(soup): """Retrieve all ``<div>`` quote element from a quote page. Args: soup (bs4.element.Tag): connection to the quote page. Returns: yield bs4.element.Tag """ for container_div in scrape_quotes_container(soup): quote_div = container_div.find('div', attrs={'class': 'quote'}) while quote_div: if quote_div.name == 'div' and quote_div.get('class') and 'quote' in quote_div.get('class'): yield quote_div quote_div = quote_div.next_sibling def get_quote_text(quote_div): """Get the text from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text. Returns: string """ quote_text = '' text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children for text in text_iterator: if text.name == 'br': quote_text += '\n' elif not text.name: quote_text += text.strip() quote_text = process_quote_text(quote_text) return quote_text def scrape_quote_tags(quote_div): """Scrape tags from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: yield ``<a>`` tags """ tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'}) if tags_container: for tag in tags_container.children: if tag.name == 'a': yield tag return None def get_quote_book(quote_div): """Get the reference (book) from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: bs4.element.Tag """ quote_details = quote_div.find('div', attrs={'class': 'quoteText'}) return quote_details.find('a', attrs={'class': 'authorOrTitle'}) def get_quote_author_name(quote_div): """Get the author's name from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: string """ quote_text = quote_div.find('div', attrs={'class': 'quoteText '}) author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text return remove_punctuation(author_name).title() def get_quote_likes(quote_div): """Get the likes ``<a>`` tag from a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: bs4.element.Tag: ``<a>`` tag for likes. """ quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'}) return quote_footer.find('a', attrs={'class': 'smallText'}) # TODO: deprecate this def get_quote_name_id(quote_div): """Get the name and id of a ``<div>`` quote element. Args: quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page. Returns: tuple: id and name. """ quote_href = get_quote_likes(quote_div).get('href') quote_id = quote_href.split('/')[-1].split('-')[0] quote_name = '-'.join(quote_href.split('/')[-1].split('-')[1:]) return quote_id, quote_name def scrape_author_books(soup): """Retrieve books from an author's page. Args: soup (bs4.element.Tag): connection to an author books page. Returns: yield bs4.element.Tag: ``<tr>`` element. """ table_tr = soup.find('tr') while table_tr: if table_tr.name == 'tr': yield table_tr table_tr = table_tr.next_sibling def get_author_book_title(book_tr): """Get the book title ``<a>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: book title ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_title = get_author_book_title(book_tr) ... print(book_title.text.strip(), book_title.get('href')) The Bell Jar /book/show/6514.The_Bell_Jar Ariel /book/show/395090.Ariel The Collected Poems /book/show/31426.The_Collected_Poems The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath """ return book_tr.find('a', attrs={'class': 'bookTitle'}) def get_author_book_author(book_tr): """Get the author ``<a>`` element from a table ``<tr>`` element. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: author name ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_author = get_author_book_author(book_tr) ... print(book_author.text, book_author.get('href')) Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath """ return book_tr.find('a', attrs={'class': 'authorName'}) def get_author_book_ratings(book_tr): """Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: ratings ``<span>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... ratings_span = get_author_book_ratings(book_tr) ... print(ratings_span.contents[-1]) 4.55 avg rating — 2,414 ratings 3.77 avg rating — 1,689 ratings 4.28 avg rating — 892 ratings 4.54 avg rating — 490 ratings ... """ return book_tr.find('span', attrs={'class': 'minirating'}) def get_author_book_edition(book_tr): """Get the edition ``<a>`` element from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: bs4.element.Tag: book edition ``<a>`` element. Examples:: >>> for book_tr in scrape_author_books(soup): ... book_edition = get_author_book_edition(book_tr) ... if book_edition: ... print(book_edition.text, book_edition.get('href')) ... print() 493 editions /work/editions/1385044-the-bell-jar 80 editions /work/editions/1185316-ariel 30 editions /work/editions/1003095-the-collected-poems 45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath ... """ book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) return book_details.find('a', attrs={'class': 'greyText'}) def get_author_book_date(book_tr): """Get the published date from a table ``<tr>`` element from an author page. Args: book_tr (bs4.element.Tag): ``<tr>`` book element. Returns: int: date of publication Examples:: >>> for book_tr in scrape_author_books(soup): ... book_date = get_author_book_date(book_tr) ... print(book_date) None None 1958 2009 ... """ book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'}) book_publish = book_details.contents[-1].replace('—', '').replace('\n', '') book_date = book_publish.replace('published', '').strip() book_date = eval(book_date) if book_date != '' else None return book_date def get_book_quote_page(soup): """Find the ``<a>`` element pointing to the quote page of a book. Args: soup (bs4.element.Tag): Returns: """ quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'}) if quote_div: return quote_div[-1].find('a') return None
find_generator
The order of any element in a group can be divided by p-1. Step 1: Calculate all Divisors. Step 2: Test for a random element e of G wether e to the power of a Divisor is 1. if neither is one but e to the power of p-1, a generator is found.
""" This file implements the signature scheme from "Unique Ring Signatures: A Practical Construction" by Matthew Franklin and Haibin Zhang """ import sys import math from random import randint import hashlib from libsig.AbstractRingSignatureScheme import AbstractRingSignatureScheme #from AbstractRingSignatureScheme import AbstractRingSignatureScheme #from libsig import primes # ----------- HELPER FUNCTIONS ----------- # function to find divisors in order to find generators def find_divisors(x): """ This is the "function to find divisors in order to find generators" module. This DocTest verifies that the module is correctly calculating all divisors of a number x. >>> find_divisors(10) [1, 2, 5, 10] >>> find_divisors(112) [1, 2, 4, 7, 8, 14, 16, 28, 56, 112] """ divisors = [ i for i in range(1,x+1) if x % i == 0] return divisors # function to find random generator of G # MASKED: find_generator function (lines 34-69) def list_to_string(input_list): ''' convert a list into a concatenated string of all its elements ''' result = ''.join(map(str,input_list)) return result # ----------- HELPER FUNCTIONS END ----------- class UniqueRingSignature(AbstractRingSignatureScheme): ''' | output: pp = (lamdba, q, G, H, H2) with, | q is prime, | g is generator of G, | G is multiplicative Group with prime order q, | H1 and H2 are two Hash functions H1: {0,1}* -> G, | (as well as H2: {0,1}* -> Zq which is the same). ''' # set prime p (Sophie-Germain and therefore save) #q = 53 q = 59 # find random generator of G g = find_generator(q-1) # hash functions with desired range and the usage of secure hashes h1 = lambda x: int(hashlib.sha256(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q) # this way to share the information should be improved h2 = lambda x: int(hashlib.sha512(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q) # list of public keys Rp = list() @staticmethod def keygen(verbose=False): #print("---- KeyGen Started ---- \n") r = randint(1,UniqueRingSignature.q) # x = g**r % q x = pow(UniqueRingSignature.g, r,UniqueRingSignature.q) # y = g**x y = pow(UniqueRingSignature.g, x, UniqueRingSignature.q) if verbose == True: print("KeyGen Config: public key y=" + str(y) + ", private key x=" + str(x) + "\n") print("---- KeyGen Completed ---- \n") # Caution! I know, keygen should NOT return the private key, but this is needed to "play" through a whole signature - validation process return x,y @staticmethod def ringsign(x, pubkey, message,verbose=False): ''' input: x is the privkey from user i, | all public keys: pubkeys, | the message output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn), | R: all the pubkeys concatenated, | cj,tj: random number within Zq ''' # calculate R = pk1,pk2,..,pkn R = list_to_string(pubkey) g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 # message + pubkeys concatenated mR = message + str(R) C = list() T = list() A = list() B = list() ri = -1 # simulation step # for i in pubkey: # Step 1: # a = 0 b = 0 c = 0 t = 0 if pow(g,x,q) != i: c, t = randint(1,q), randint(1,q) a = (pow(g, t) * pow(int(i), c)) % q b = (pow(h1(mR), t) * pow(pow(h1(mR),x),c)) % q else: # Step 2: # ri = randint(1, q) a = pow(g, ri, q) b = pow(h1(mR), ri, q) # insert to allocate place c = -1 t = -1 A.append(a) B.append(b) C.append(c) T.append(t) # for end # Step 3: # cj = 0 # list count from 0 ab = ''.join('{}{}'.format(*t) for t in zip(A,B)) usernr = 0 for i in range(len(pubkey)): if pubkey[i] != (pow(g,x,q)): cj = (cj + C[i]) % q else: usernr = i ci = h2(message + R + ab) - (cj % (q-1)) # update ci, this was initialized with -1 C[usernr] = ci ti = ((ri - (C[usernr]*x)) % (q-1)) if ti < 0: ti = (q-1) + ti # update ti, this was initialized with -1 T[usernr] = ti # Step 4: # # concatenate ct: c1,t1,c2,t2,...,cn,tn ct = ','.join('{},{}'.format(*t) for t in zip(C,T)) # returning result result = R + ","+message+","+str(pow(h1(mR),x, q))+"," + ct if verbose == True: print("RingSign Result: "+ result) print("---- RingSign Completed ---- \n") return result @staticmethod def verify(R, message, signature,verbose=False): ''' Input: the public keys R | the message | the signature computed with ringsign Output: whether the message was signed by R or not ''' g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 # parse the signature parsed = signature.split(",") tt = int(parsed[2]) cjs = list() tjs = list() for i in range(0,int(((len(parsed))/2)-1)): cjs.append(int(parsed[3+2*i])) tjs.append(int(parsed[4+2*i])) #print(str(cjs)+" "+str(tjs) + " "+ str(tt)) # check signature # sum of all cjs # =? # self.pp['h2'](message + R + gyh1) mR = list_to_string(R) val1 = sum(cjs) % q # for all users in R: # g**tj * yj ** cj , h1(m||R)**tj * tt**cj gyh1 = "" for i in range(len(tjs)): if tjs[i] < 0: tjs[i] = (q-1) + tjs[i] if cjs[i] < 0: cjs[i] = (q-1) + cjs[i] gy = (pow(g,(tjs[i]),q) * (pow((R[i]),(cjs[i]),q))) % q h = (pow(int(h1(message + mR)), int(tjs[i])) * pow(tt,int(cjs[i]))) % q gyh1 = gyh1 + str( gy) + str( h) val2 = str(h2(message + list_to_string(R) + gyh1)) if int(val1) == int(val2): if verbose == True: print("Signature is valid!\n") print("Common Result: " + str(val1)) print("---- Validation Completed ---- \n") return True else: if verbose == True: print("Signature is not valid!\n") print(str(val1) + " != " + str(val2)) print("---- Validation Completed ---- \n") return False def local_test(verbose=True): # verbose output print(verbose) # user 1 will signate and validate later, # therefore his private key is saved for test purposes privKey1,pubkey = UniqueRingSignature.keygen(verbose) UniqueRingSignature.Rp.append(pubkey) a,pubkey = UniqueRingSignature.keygen(verbose) UniqueRingSignature.Rp.append(pubkey) # usernr start from 0 # ringsign(self, privkey, usernr, pubkeys, message) ring = UniqueRingSignature.ringsign(privKey1, UniqueRingSignature.Rp, "asdf", verbose) if verbose: print("Result of Signature Validation:") # verify(pubkeys, message, signature): UniqueRingSignature.verify(UniqueRingSignature.Rp, "asdf", ring, verbose) if __name__ == '__main__': # doctest start import doctest doctest.testmod() if len(sys.argv) > 1: verbose = False if sys.argv[1] == "True": verbose = True # run a local test local_test(verbose)
def find_generator(p): ''' The order of any element in a group can be divided by p-1. Step 1: Calculate all Divisors. Step 2: Test for a random element e of G wether e to the power of a Divisor is 1. if neither is one but e to the power of p-1, a generator is found. ''' # Init # Generate element which is tested for generator characteristics. # Saved in list to prevent checking the same element twice. testGen = randint(1,p) listTested = [] listTested.append(testGen) # Step 1. divisors = find_divisors(p) # try for all random numbers # Caution: this leads to a truly random generator but is not very efficient. while len(listTested) < p-1: # only test each possible generator once if testGen in listTested: # Step 2. for div in divisors: testPotency = math.pow(testGen,div) % (p+1) if testPotency == 1.0 and div != divisors[-1]: # element does not have the same order like the group, # therefore try next element break elif testPotency == 1.0 and div == divisors[-1]: # generator is found return testGen # try new element testGen = randint(1,p) listTested.append(testGen)
34
69
""" This file implements the signature scheme from "Unique Ring Signatures: A Practical Construction" by Matthew Franklin and Haibin Zhang """ import sys import math from random import randint import hashlib from libsig.AbstractRingSignatureScheme import AbstractRingSignatureScheme #from AbstractRingSignatureScheme import AbstractRingSignatureScheme #from libsig import primes # ----------- HELPER FUNCTIONS ----------- # function to find divisors in order to find generators def find_divisors(x): """ This is the "function to find divisors in order to find generators" module. This DocTest verifies that the module is correctly calculating all divisors of a number x. >>> find_divisors(10) [1, 2, 5, 10] >>> find_divisors(112) [1, 2, 4, 7, 8, 14, 16, 28, 56, 112] """ divisors = [ i for i in range(1,x+1) if x % i == 0] return divisors # function to find random generator of G def find_generator(p): ''' The order of any element in a group can be divided by p-1. Step 1: Calculate all Divisors. Step 2: Test for a random element e of G wether e to the power of a Divisor is 1. if neither is one but e to the power of p-1, a generator is found. ''' # Init # Generate element which is tested for generator characteristics. # Saved in list to prevent checking the same element twice. testGen = randint(1,p) listTested = [] listTested.append(testGen) # Step 1. divisors = find_divisors(p) # try for all random numbers # Caution: this leads to a truly random generator but is not very efficient. while len(listTested) < p-1: # only test each possible generator once if testGen in listTested: # Step 2. for div in divisors: testPotency = math.pow(testGen,div) % (p+1) if testPotency == 1.0 and div != divisors[-1]: # element does not have the same order like the group, # therefore try next element break elif testPotency == 1.0 and div == divisors[-1]: # generator is found return testGen # try new element testGen = randint(1,p) listTested.append(testGen) def list_to_string(input_list): ''' convert a list into a concatenated string of all its elements ''' result = ''.join(map(str,input_list)) return result # ----------- HELPER FUNCTIONS END ----------- class UniqueRingSignature(AbstractRingSignatureScheme): ''' | output: pp = (lamdba, q, G, H, H2) with, | q is prime, | g is generator of G, | G is multiplicative Group with prime order q, | H1 and H2 are two Hash functions H1: {0,1}* -> G, | (as well as H2: {0,1}* -> Zq which is the same). ''' # set prime p (Sophie-Germain and therefore save) #q = 53 q = 59 # find random generator of G g = find_generator(q-1) # hash functions with desired range and the usage of secure hashes h1 = lambda x: int(hashlib.sha256(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q) # this way to share the information should be improved h2 = lambda x: int(hashlib.sha512(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q) # list of public keys Rp = list() @staticmethod def keygen(verbose=False): #print("---- KeyGen Started ---- \n") r = randint(1,UniqueRingSignature.q) # x = g**r % q x = pow(UniqueRingSignature.g, r,UniqueRingSignature.q) # y = g**x y = pow(UniqueRingSignature.g, x, UniqueRingSignature.q) if verbose == True: print("KeyGen Config: public key y=" + str(y) + ", private key x=" + str(x) + "\n") print("---- KeyGen Completed ---- \n") # Caution! I know, keygen should NOT return the private key, but this is needed to "play" through a whole signature - validation process return x,y @staticmethod def ringsign(x, pubkey, message,verbose=False): ''' input: x is the privkey from user i, | all public keys: pubkeys, | the message output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn), | R: all the pubkeys concatenated, | cj,tj: random number within Zq ''' # calculate R = pk1,pk2,..,pkn R = list_to_string(pubkey) g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 # message + pubkeys concatenated mR = message + str(R) C = list() T = list() A = list() B = list() ri = -1 # simulation step # for i in pubkey: # Step 1: # a = 0 b = 0 c = 0 t = 0 if pow(g,x,q) != i: c, t = randint(1,q), randint(1,q) a = (pow(g, t) * pow(int(i), c)) % q b = (pow(h1(mR), t) * pow(pow(h1(mR),x),c)) % q else: # Step 2: # ri = randint(1, q) a = pow(g, ri, q) b = pow(h1(mR), ri, q) # insert to allocate place c = -1 t = -1 A.append(a) B.append(b) C.append(c) T.append(t) # for end # Step 3: # cj = 0 # list count from 0 ab = ''.join('{}{}'.format(*t) for t in zip(A,B)) usernr = 0 for i in range(len(pubkey)): if pubkey[i] != (pow(g,x,q)): cj = (cj + C[i]) % q else: usernr = i ci = h2(message + R + ab) - (cj % (q-1)) # update ci, this was initialized with -1 C[usernr] = ci ti = ((ri - (C[usernr]*x)) % (q-1)) if ti < 0: ti = (q-1) + ti # update ti, this was initialized with -1 T[usernr] = ti # Step 4: # # concatenate ct: c1,t1,c2,t2,...,cn,tn ct = ','.join('{},{}'.format(*t) for t in zip(C,T)) # returning result result = R + ","+message+","+str(pow(h1(mR),x, q))+"," + ct if verbose == True: print("RingSign Result: "+ result) print("---- RingSign Completed ---- \n") return result @staticmethod def verify(R, message, signature,verbose=False): ''' Input: the public keys R | the message | the signature computed with ringsign Output: whether the message was signed by R or not ''' g = UniqueRingSignature.g q = UniqueRingSignature.q h1 = UniqueRingSignature.h1 h2 = UniqueRingSignature.h2 # parse the signature parsed = signature.split(",") tt = int(parsed[2]) cjs = list() tjs = list() for i in range(0,int(((len(parsed))/2)-1)): cjs.append(int(parsed[3+2*i])) tjs.append(int(parsed[4+2*i])) #print(str(cjs)+" "+str(tjs) + " "+ str(tt)) # check signature # sum of all cjs # =? # self.pp['h2'](message + R + gyh1) mR = list_to_string(R) val1 = sum(cjs) % q # for all users in R: # g**tj * yj ** cj , h1(m||R)**tj * tt**cj gyh1 = "" for i in range(len(tjs)): if tjs[i] < 0: tjs[i] = (q-1) + tjs[i] if cjs[i] < 0: cjs[i] = (q-1) + cjs[i] gy = (pow(g,(tjs[i]),q) * (pow((R[i]),(cjs[i]),q))) % q h = (pow(int(h1(message + mR)), int(tjs[i])) * pow(tt,int(cjs[i]))) % q gyh1 = gyh1 + str( gy) + str( h) val2 = str(h2(message + list_to_string(R) + gyh1)) if int(val1) == int(val2): if verbose == True: print("Signature is valid!\n") print("Common Result: " + str(val1)) print("---- Validation Completed ---- \n") return True else: if verbose == True: print("Signature is not valid!\n") print(str(val1) + " != " + str(val2)) print("---- Validation Completed ---- \n") return False def local_test(verbose=True): # verbose output print(verbose) # user 1 will signate and validate later, # therefore his private key is saved for test purposes privKey1,pubkey = UniqueRingSignature.keygen(verbose) UniqueRingSignature.Rp.append(pubkey) a,pubkey = UniqueRingSignature.keygen(verbose) UniqueRingSignature.Rp.append(pubkey) # usernr start from 0 # ringsign(self, privkey, usernr, pubkeys, message) ring = UniqueRingSignature.ringsign(privKey1, UniqueRingSignature.Rp, "asdf", verbose) if verbose: print("Result of Signature Validation:") # verify(pubkeys, message, signature): UniqueRingSignature.verify(UniqueRingSignature.Rp, "asdf", ring, verbose) if __name__ == '__main__': # doctest start import doctest doctest.testmod() if len(sys.argv) > 1: verbose = False if sys.argv[1] == "True": verbose = True # run a local test local_test(verbose)
children
Get childen nodes of this node. Arguments: depth: Number of levels of children to traverse. 0 returns only this node. include_self: Includes this node in the results. include_parents: Includes nodes that match in the results, when they also have child nodes that match. include_children: If True, as soon as a match is found it's children will not be included in the search. required_offset: Only match nodes with a source offset that contains this offset. offset_limits: Only match nodes when their source offset is contained inside this source offset. filters: Dictionary of {attribute: value} that children must match. Can also be given as a list of dicts, children that match one of the dicts will be returned. exclude_filter: Dictionary of {attribute:value} that children cannot match. Returns: List of node objects.
#!/usr/bin/python3 import functools from copy import deepcopy from .grammar import BASE_NODE_TYPES class NodeBase: """Represents a node within the solidity AST. Attributes: depth: Number of nodes between this node and the SourceUnit offset: Absolute source offsets as a (start, stop) tuple contract_id: Contract ID as given by the standard compiler JSON fields: List of attributes for this node """ def __init__(self, ast, parent): self.depth = parent.depth + 1 if parent is not None else 0 self._parent = parent self._children = set() src = [int(i) for i in ast["src"].split(":")] self.offset = (src[0], src[0] + src[1]) self.contract_id = src[2] self.fields = sorted(ast.keys()) for key, value in ast.items(): if isinstance(value, dict) and value.get("nodeType") == "Block": value = value["statements"] elif key == "body" and not value: value = [] if isinstance(value, dict): item = node_class_factory(value, self) if isinstance(item, NodeBase): self._children.add(item) setattr(self, key, item) elif isinstance(value, list): items = [node_class_factory(i, self) for i in value] setattr(self, key, items) self._children.update(i for i in items if isinstance(i, NodeBase)) else: setattr(self, key, value) def __hash__(self): return hash(f"{self.nodeType}{self.depth}{self.offset}") def __repr__(self): repr_str = f"<{self.nodeType}" if hasattr(self, "nodes"): repr_str += " iterable" if hasattr(self, "type"): if isinstance(self.type, str): repr_str += f" {self.type}" else: repr_str += f" {self.type._display()}" if self._display(): repr_str += f" '{self._display()}'" else: repr_str += " object" return f"{repr_str}>" def _display(self): if hasattr(self, "name") and hasattr(self, "value"): return f"{self.name} = {self.value}" for attr in ("name", "value", "absolutePath"): if hasattr(self, attr): return f"{getattr(self, attr)}" return "" # MASKED: children function (lines 71-114) def parents(self, depth=-1, filters=None): """Get parent nodes of this node. Arguments: depth: Depth limit. If given as a negative value, it will be subtracted from this object's depth. filters: Dictionary of {attribute: value} that parents must match. Returns: list of nodes""" if filters and not isinstance(filters, dict): raise TypeError("Filters must be a dict") if depth < 0: depth = self.depth + depth if depth >= self.depth or depth < 0: raise IndexError("Given depth exceeds node depth") node_list = [] parent = self while True: parent = parent._parent if not filters or _check_filter(parent, filters, {}): node_list.append(parent) if parent.depth == depth: return node_list def parent(self, depth=-1, filters=None): """Get a parent node of this node. Arguments: depth: Depth limit. If given as a negative value, it will be subtracted from this object's depth. The parent at this exact depth is returned. filters: Dictionary of {attribute: value} that the parent must match. If a filter value is given, will return the first parent that meets the filters up to the given depth. If none is found, returns None. If no filter is given, returns the parent at the given depth.""" if filters and not isinstance(filters, dict): raise TypeError("Filters must be a dict") if depth < 0: depth = self.depth + depth if depth >= self.depth or depth < 0: raise IndexError("Given depth exceeds node depth") parent = self while parent.depth > depth: parent = parent._parent if parent.depth == depth and not filters: return parent if filters and _check_filter(parent, filters, {}): return parent return None def is_child_of(self, node): """Checks if this object is a child of the given node object.""" if node.depth >= self.depth: return False return self.parent(node.depth) == node def is_parent_of(self, node): """Checks if this object is a parent of the given node object.""" if node.depth <= self.depth: return False return node.parent(self.depth) == self def get(self, key, default=None): """ Gets an attribute from this node, if that attribute exists. Arguments: key: Field name to return. May contain decimals to return a value from a child node. default: Default value to return. Returns: Field value if it exists. Default value if not. """ if key is None: raise TypeError("Cannot match against None") obj = self for k in key.split("."): if isinstance(obj, dict): obj = obj.get(k) else: obj = getattr(obj, k, None) return obj or default class IterableNodeBase(NodeBase): def __getitem__(self, key): if isinstance(key, str): try: return next(i for i in self.nodes if getattr(i, "name", None) == key) except StopIteration: raise KeyError(key) return self.nodes[key] def __iter__(self): return iter(self.nodes) def __len__(self): return len(self.nodes) def __contains__(self, obj): return obj in self.nodes def node_class_factory(ast, parent): ast = deepcopy(ast) if not isinstance(ast, dict) or "nodeType" not in ast: return ast if "body" in ast: ast["nodes"] = ast.pop("body") base_class = IterableNodeBase if "nodes" in ast else NodeBase base_type = next((k for k, v in BASE_NODE_TYPES.items() if ast["nodeType"] in v), None) if base_type: ast["baseNodeType"] = base_type return type(ast["nodeType"], (base_class,), {})(ast, parent) def _check_filters(required_offset, offset_limits, filters, exclude, node): if required_offset and not is_inside_offset(required_offset, node.offset): return False if offset_limits and not is_inside_offset(node.offset, offset_limits): return False for f in filters: if _check_filter(node, f, exclude): return True return False def _check_filter(node, filters, exclude): for key, value in filters.items(): if node.get(key) != value: return False for key, value in exclude.items(): if node.get(key) == value: return False return True def _find_children(filter_fn, include_parents, include_children, find_fn, depth, node): if depth is not None: depth -= 1 if depth < 0: return [node] if filter_fn(node) else [] if not include_children and filter_fn(node): return [node] node_list = [] for child in node._children: node_list.extend(find_fn(find_fn, depth, child)) if (include_parents or not node_list) and filter_fn(node): node_list.insert(0, node) return node_list def is_inside_offset(inner, outer): """Checks if the first offset is contained in the second offset Args: inner: inner offset tuple outer: outer offset tuple Returns: bool""" return outer[0] <= inner[0] <= inner[1] <= outer[1]
def children( self, depth=None, include_self=False, include_parents=True, include_children=True, required_offset=None, offset_limits=None, filters=None, exclude_filter=None, ): """Get childen nodes of this node. Arguments: depth: Number of levels of children to traverse. 0 returns only this node. include_self: Includes this node in the results. include_parents: Includes nodes that match in the results, when they also have child nodes that match. include_children: If True, as soon as a match is found it's children will not be included in the search. required_offset: Only match nodes with a source offset that contains this offset. offset_limits: Only match nodes when their source offset is contained inside this source offset. filters: Dictionary of {attribute: value} that children must match. Can also be given as a list of dicts, children that match one of the dicts will be returned. exclude_filter: Dictionary of {attribute:value} that children cannot match. Returns: List of node objects.""" if filters is None: filters = {} if exclude_filter is None: exclude_filter = {} if isinstance(filters, dict): filters = [filters] filter_fn = functools.partial( _check_filters, required_offset, offset_limits, filters, exclude_filter ) find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children) result = find_fn(find_fn, depth, self) if include_self or not result or result[0] != self: return result return result[1:]
71
114
#!/usr/bin/python3 import functools from copy import deepcopy from .grammar import BASE_NODE_TYPES class NodeBase: """Represents a node within the solidity AST. Attributes: depth: Number of nodes between this node and the SourceUnit offset: Absolute source offsets as a (start, stop) tuple contract_id: Contract ID as given by the standard compiler JSON fields: List of attributes for this node """ def __init__(self, ast, parent): self.depth = parent.depth + 1 if parent is not None else 0 self._parent = parent self._children = set() src = [int(i) for i in ast["src"].split(":")] self.offset = (src[0], src[0] + src[1]) self.contract_id = src[2] self.fields = sorted(ast.keys()) for key, value in ast.items(): if isinstance(value, dict) and value.get("nodeType") == "Block": value = value["statements"] elif key == "body" and not value: value = [] if isinstance(value, dict): item = node_class_factory(value, self) if isinstance(item, NodeBase): self._children.add(item) setattr(self, key, item) elif isinstance(value, list): items = [node_class_factory(i, self) for i in value] setattr(self, key, items) self._children.update(i for i in items if isinstance(i, NodeBase)) else: setattr(self, key, value) def __hash__(self): return hash(f"{self.nodeType}{self.depth}{self.offset}") def __repr__(self): repr_str = f"<{self.nodeType}" if hasattr(self, "nodes"): repr_str += " iterable" if hasattr(self, "type"): if isinstance(self.type, str): repr_str += f" {self.type}" else: repr_str += f" {self.type._display()}" if self._display(): repr_str += f" '{self._display()}'" else: repr_str += " object" return f"{repr_str}>" def _display(self): if hasattr(self, "name") and hasattr(self, "value"): return f"{self.name} = {self.value}" for attr in ("name", "value", "absolutePath"): if hasattr(self, attr): return f"{getattr(self, attr)}" return "" def children( self, depth=None, include_self=False, include_parents=True, include_children=True, required_offset=None, offset_limits=None, filters=None, exclude_filter=None, ): """Get childen nodes of this node. Arguments: depth: Number of levels of children to traverse. 0 returns only this node. include_self: Includes this node in the results. include_parents: Includes nodes that match in the results, when they also have child nodes that match. include_children: If True, as soon as a match is found it's children will not be included in the search. required_offset: Only match nodes with a source offset that contains this offset. offset_limits: Only match nodes when their source offset is contained inside this source offset. filters: Dictionary of {attribute: value} that children must match. Can also be given as a list of dicts, children that match one of the dicts will be returned. exclude_filter: Dictionary of {attribute:value} that children cannot match. Returns: List of node objects.""" if filters is None: filters = {} if exclude_filter is None: exclude_filter = {} if isinstance(filters, dict): filters = [filters] filter_fn = functools.partial( _check_filters, required_offset, offset_limits, filters, exclude_filter ) find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children) result = find_fn(find_fn, depth, self) if include_self or not result or result[0] != self: return result return result[1:] def parents(self, depth=-1, filters=None): """Get parent nodes of this node. Arguments: depth: Depth limit. If given as a negative value, it will be subtracted from this object's depth. filters: Dictionary of {attribute: value} that parents must match. Returns: list of nodes""" if filters and not isinstance(filters, dict): raise TypeError("Filters must be a dict") if depth < 0: depth = self.depth + depth if depth >= self.depth or depth < 0: raise IndexError("Given depth exceeds node depth") node_list = [] parent = self while True: parent = parent._parent if not filters or _check_filter(parent, filters, {}): node_list.append(parent) if parent.depth == depth: return node_list def parent(self, depth=-1, filters=None): """Get a parent node of this node. Arguments: depth: Depth limit. If given as a negative value, it will be subtracted from this object's depth. The parent at this exact depth is returned. filters: Dictionary of {attribute: value} that the parent must match. If a filter value is given, will return the first parent that meets the filters up to the given depth. If none is found, returns None. If no filter is given, returns the parent at the given depth.""" if filters and not isinstance(filters, dict): raise TypeError("Filters must be a dict") if depth < 0: depth = self.depth + depth if depth >= self.depth or depth < 0: raise IndexError("Given depth exceeds node depth") parent = self while parent.depth > depth: parent = parent._parent if parent.depth == depth and not filters: return parent if filters and _check_filter(parent, filters, {}): return parent return None def is_child_of(self, node): """Checks if this object is a child of the given node object.""" if node.depth >= self.depth: return False return self.parent(node.depth) == node def is_parent_of(self, node): """Checks if this object is a parent of the given node object.""" if node.depth <= self.depth: return False return node.parent(self.depth) == self def get(self, key, default=None): """ Gets an attribute from this node, if that attribute exists. Arguments: key: Field name to return. May contain decimals to return a value from a child node. default: Default value to return. Returns: Field value if it exists. Default value if not. """ if key is None: raise TypeError("Cannot match against None") obj = self for k in key.split("."): if isinstance(obj, dict): obj = obj.get(k) else: obj = getattr(obj, k, None) return obj or default class IterableNodeBase(NodeBase): def __getitem__(self, key): if isinstance(key, str): try: return next(i for i in self.nodes if getattr(i, "name", None) == key) except StopIteration: raise KeyError(key) return self.nodes[key] def __iter__(self): return iter(self.nodes) def __len__(self): return len(self.nodes) def __contains__(self, obj): return obj in self.nodes def node_class_factory(ast, parent): ast = deepcopy(ast) if not isinstance(ast, dict) or "nodeType" not in ast: return ast if "body" in ast: ast["nodes"] = ast.pop("body") base_class = IterableNodeBase if "nodes" in ast else NodeBase base_type = next((k for k, v in BASE_NODE_TYPES.items() if ast["nodeType"] in v), None) if base_type: ast["baseNodeType"] = base_type return type(ast["nodeType"], (base_class,), {})(ast, parent) def _check_filters(required_offset, offset_limits, filters, exclude, node): if required_offset and not is_inside_offset(required_offset, node.offset): return False if offset_limits and not is_inside_offset(node.offset, offset_limits): return False for f in filters: if _check_filter(node, f, exclude): return True return False def _check_filter(node, filters, exclude): for key, value in filters.items(): if node.get(key) != value: return False for key, value in exclude.items(): if node.get(key) == value: return False return True def _find_children(filter_fn, include_parents, include_children, find_fn, depth, node): if depth is not None: depth -= 1 if depth < 0: return [node] if filter_fn(node) else [] if not include_children and filter_fn(node): return [node] node_list = [] for child in node._children: node_list.extend(find_fn(find_fn, depth, child)) if (include_parents or not node_list) and filter_fn(node): node_list.insert(0, node) return node_list def is_inside_offset(inner, outer): """Checks if the first offset is contained in the second offset Args: inner: inner offset tuple outer: outer offset tuple Returns: bool""" return outer[0] <= inner[0] <= inner[1] <= outer[1]
pqcost
Splits the gencost variable into two pieces if costs are given for Qg. Checks whether C{gencost} has cost information for reactive power generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng} rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves C{qcost} empty. Also does some error checking. If C{on} is specified (list of indices of generators which are on line) it only returns the rows corresponding to these generators. @author: Ray Zimmerman (PSERC Cornell)
# Copyright (c) 1996-2015 PSERC. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. """Splits the gencost variable into two pieces if costs are given for Qg. """ from sys import stderr from numpy import array, arange # MASKED: pqcost function (lines 13-37)
def pqcost(gencost, ng, on=None): """Splits the gencost variable into two pieces if costs are given for Qg. Checks whether C{gencost} has cost information for reactive power generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng} rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves C{qcost} empty. Also does some error checking. If C{on} is specified (list of indices of generators which are on line) it only returns the rows corresponding to these generators. @author: Ray Zimmerman (PSERC Cornell) """ if on is None: on = arange(ng) if gencost.shape[0] == ng: pcost = gencost[on, :] qcost = array([]) elif gencost.shape[0] == 2 * ng: pcost = gencost[on, :] qcost = gencost[on + ng, :] else: stderr.write('pqcost: gencost has wrong number of rows\n') return pcost, qcost
13
37
# Copyright (c) 1996-2015 PSERC. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. """Splits the gencost variable into two pieces if costs are given for Qg. """ from sys import stderr from numpy import array, arange def pqcost(gencost, ng, on=None): """Splits the gencost variable into two pieces if costs are given for Qg. Checks whether C{gencost} has cost information for reactive power generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng} rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves C{qcost} empty. Also does some error checking. If C{on} is specified (list of indices of generators which are on line) it only returns the rows corresponding to these generators. @author: Ray Zimmerman (PSERC Cornell) """ if on is None: on = arange(ng) if gencost.shape[0] == ng: pcost = gencost[on, :] qcost = array([]) elif gencost.shape[0] == 2 * ng: pcost = gencost[on, :] qcost = gencost[on + ng, :] else: stderr.write('pqcost: gencost has wrong number of rows\n') return pcost, qcost
thumbIncrementCheck
Checks whether your thumb is up or not. No matter what hand you use. returns 1 if thumb is up else 0
"""Python 3.9.5""" import cv2 import HandTrackingModule as htm # MASKED: thumbIncrementCheck function (lines 7-20) def textOutput(count, cc) -> str: """Returns an appropriate text output depending on `count` and `cc`.""" text = "NOTHING" if (count, cc) == (2, 2): text = "SCISSOR" elif count == 0: text = "ROCK" elif count == 5: text = "PAPER" else: pass return text def main(): # cap = cv2.VideoCapture(0) # opens the camera detector = htm.HandDetector() while True: success, img = cv2.imread("/home/laughinglouds/Pictures/Webcam/2021-04-13-133250.jpg") img = detector.findHands(img) lmlist = detector.findPosition(img, draw=True) # If a hand is not detected value will be 0 # else non-zero (21) hand_exists = len(lmlist) tipIDs = [4, 8, 12, 16, 20] # Represents fingertips dipIDs = [2, 7, 11, 15, 19] # Represents landmarks below the tips count = 0 # keeps count of how many fingers are up cc = 0 # for later checking if `Scissor` or not if hand_exists: # Looping for the five fingers for i in range(0, 5): if i == 0: count += thumbIncrementCheck(lmlist) else: # 8: Index finger # 12: Middle finger if (lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]) and ( tipIDs[i] in (8, 12) # if either index or middle ): count += 1 cc += 1 elif lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]: count += 1 # print(cc) else: count = -1 txt = textOutput(count, cc) # (10, 140) is coordinate of txt on the screen cv2.putText(img, str(txt), (10, 140), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3) cv2.imshow("Image", img) # close key isn't working for me # os: linux mint 20.1 if cv2.waitKey(1) & 0xFF == ord("q"): break if __name__ == "__main__": main()
def thumbIncrementCheck(lmList: list[list[int]]) -> int: """Checks whether your thumb is up or not. No matter what hand you use. returns 1 if thumb is up else 0""" count = 0 t_x = lmList[4][1] p_x = lmList[17][1] if t_x > p_x: # If true: RIGHT hand if lmList[4][1] >= lmList[2][1]: count += 1 else: # ELse: LEFT hand if lmList[4][1] <= lmList[2][1]: count += 1 return count
7
20
"""Python 3.9.5""" import cv2 import HandTrackingModule as htm def thumbIncrementCheck(lmList: list[list[int]]) -> int: """Checks whether your thumb is up or not. No matter what hand you use. returns 1 if thumb is up else 0""" count = 0 t_x = lmList[4][1] p_x = lmList[17][1] if t_x > p_x: # If true: RIGHT hand if lmList[4][1] >= lmList[2][1]: count += 1 else: # ELse: LEFT hand if lmList[4][1] <= lmList[2][1]: count += 1 return count def textOutput(count, cc) -> str: """Returns an appropriate text output depending on `count` and `cc`.""" text = "NOTHING" if (count, cc) == (2, 2): text = "SCISSOR" elif count == 0: text = "ROCK" elif count == 5: text = "PAPER" else: pass return text def main(): # cap = cv2.VideoCapture(0) # opens the camera detector = htm.HandDetector() while True: success, img = cv2.imread("/home/laughinglouds/Pictures/Webcam/2021-04-13-133250.jpg") img = detector.findHands(img) lmlist = detector.findPosition(img, draw=True) # If a hand is not detected value will be 0 # else non-zero (21) hand_exists = len(lmlist) tipIDs = [4, 8, 12, 16, 20] # Represents fingertips dipIDs = [2, 7, 11, 15, 19] # Represents landmarks below the tips count = 0 # keeps count of how many fingers are up cc = 0 # for later checking if `Scissor` or not if hand_exists: # Looping for the five fingers for i in range(0, 5): if i == 0: count += thumbIncrementCheck(lmlist) else: # 8: Index finger # 12: Middle finger if (lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]) and ( tipIDs[i] in (8, 12) # if either index or middle ): count += 1 cc += 1 elif lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]: count += 1 # print(cc) else: count = -1 txt = textOutput(count, cc) # (10, 140) is coordinate of txt on the screen cv2.putText(img, str(txt), (10, 140), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3) cv2.imshow("Image", img) # close key isn't working for me # os: linux mint 20.1 if cv2.waitKey(1) & 0xFF == ord("q"): break if __name__ == "__main__": main()
_read_kit_data
Read epochs data. Returns ------- data : array, [channels x samples] the data matrix (channels x samples). times : array, [samples] returns the time values corresponding to the samples.
"""Conversion tool from SQD to FIF. RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py. """ # Authors: Teon Brooks <[email protected]> # Joan Massich <[email protected]> # Christian Brodbeck <[email protected]> # # License: BSD (3-clause) from collections import defaultdict, OrderedDict from math import sin, cos from os import SEEK_CUR, path as op from struct import unpack import numpy as np from scipy import linalg from ..pick import pick_types from ...utils import (verbose, logger, warn, fill_doc, _check_option, _stamp_to_dt) from ...transforms import apply_trans, als_ras_trans from ..base import BaseRaw from ..utils import _mult_cal_one from ...epochs import BaseEpochs from ..constants import FIFF from ..meas_info import _empty_info from .constants import KIT, LEGACY_AMP_PARAMS from .coreg import read_mrk from ...event import read_events from .._digitization import _set_dig_kit def _call_digitization(info, mrk, elp, hsp, kit_info): # Use values from kit_info only if all others are None if mrk is None and elp is None and hsp is None: mrk = kit_info.get('mrk', None) elp = kit_info.get('elp', None) hsp = kit_info.get('hsp', None) # prepare mrk if isinstance(mrk, list): mrk = [read_mrk(marker) if isinstance(marker, str) else marker for marker in mrk] mrk = np.mean(mrk, axis=0) # setup digitization if mrk is not None and elp is not None and hsp is not None: dig_points, dev_head_t = _set_dig_kit( mrk, elp, hsp, kit_info['eeg_dig']) info['dig'] = dig_points info['dev_head_t'] = dev_head_t elif mrk is not None or elp is not None or hsp is not None: raise ValueError("mrk, elp and hsp need to be provided as a group " "(all or none)") return info class UnsupportedKITFormat(ValueError): """Our reader is not guaranteed to work with old files.""" def __init__(self, sqd_version, *args, **kwargs): # noqa: D102 self.sqd_version = sqd_version ValueError.__init__(self, *args, **kwargs) @fill_doc class RawKIT(BaseRaw): """Raw object from KIT SQD file. Parameters ---------- input_fname : str Path to the sqd file. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. stim : list of int | '<' | '>' | None Channel-value correspondence when converting KIT trigger channels to a Neuromag-style stim channel. For '<', the largest values are assigned to the first channel (default). For '>', the largest values are assigned to the last channel. Can also be specified as a list of trigger channel indexes. If None, no synthesized channel is generated. slope : '+' | '-' How to interpret values on KIT trigger channels when synthesizing a Neuromag-style stim channel. With '+', a positive slope (low-to-high) is interpreted as an event. With '-', a negative slope (high-to-low) is interpreted as an event. stimthresh : float The threshold level for accepting voltage changes in KIT trigger channels as a trigger event. If None, stim must also be set to None. %(preload)s stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Notes ----- ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the Polhemus FastScan system. hsp refers to the headshape surface points. elp refers to the points in head-space that corresponds to the HPI points. Currently, '*.elp' and '*.hsp' files are NOT supported. See Also -------- mne.io.Raw : Documentation of attribute and methods. """ @verbose def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>', slope='-', stimthresh=1, preload=False, stim_code='binary', allow_unknown_format=False, standardize_names=None, verbose=None): # noqa: D102 logger.info('Extracting SQD Parameters from %s...' % input_fname) input_fname = op.abspath(input_fname) self.preload = False logger.info('Creating Raw.info structure...') info, kit_info = get_kit_info( input_fname, allow_unknown_format, standardize_names) kit_info['slope'] = slope kit_info['stimthresh'] = stimthresh if kit_info['acq_type'] != KIT.CONTINUOUS: raise TypeError('SQD file contains epochs, not raw data. Wrong ' 'reader.') logger.info('Creating Info structure...') last_samps = [kit_info['n_samples'] - 1] self._raw_extras = [kit_info] self._set_stimchannels(info, stim, stim_code) super(RawKIT, self).__init__( info, preload, last_samps=last_samps, filenames=[input_fname], raw_extras=self._raw_extras, verbose=verbose) self.info = _call_digitization( info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info) logger.info('Ready.') def read_stim_ch(self, buffer_size=1e5): """Read events from data. Parameter --------- buffer_size : int The size of chunk to by which the data are scanned. Returns ------- events : array, [samples] The event vector (1 x samples). """ buffer_size = int(buffer_size) start = int(self.first_samp) stop = int(self.last_samp + 1) pick = pick_types(self.info, meg=False, ref_meg=False, stim=True, exclude=[]) stim_ch = np.empty((1, stop), dtype=np.int64) for b_start in range(start, stop, buffer_size): b_stop = b_start + buffer_size x = self[pick, b_start:b_stop][0] stim_ch[:, b_start:b_start + x.shape[1]] = x return stim_ch def _set_stimchannels(self, info, stim, stim_code): """Specify how the trigger channel is synthesized from analog channels. Has to be done before loading data. For a RawKIT instance that has been created with preload=True, this method will raise a NotImplementedError. Parameters ---------- info : instance of MeasInfo The measurement info. stim : list of int | '<' | '>' Can be submitted as list of trigger channels. If a list is not specified, the default triggers extracted from misc channels will be used with specified directionality. '<' means that largest values assigned to the first channel in sequence. '>' means the largest trigger assigned to the last channel in sequence. stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number. """ if self.preload: raise NotImplementedError("Can't change stim channel after " "loading data") _check_option('stim_code', stim_code, ['binary', 'channel']) if stim is not None: if isinstance(stim, str): picks = _default_stim_chs(info) if stim == '<': stim = picks[::-1] elif stim == '>': stim = picks else: raise ValueError("stim needs to be list of int, '>' or " "'<', not %r" % str(stim)) else: stim = np.asarray(stim, int) if stim.max() >= self._raw_extras[0]['nchan']: raise ValueError( 'Got stim=%s, but sqd file only has %i channels' % (stim, self._raw_extras[0]['nchan'])) # modify info nchan = self._raw_extras[0]['nchan'] + 1 info['chs'].append(dict( cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0, unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE, ch_name='STI 014', coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan), kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) info._update_redundant() self._raw_extras[0]['stim'] = stim self._raw_extras[0]['stim_code'] = stim_code def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" sqd = self._raw_extras[fi] nchan = sqd['nchan'] data_left = (stop - start) * nchan conv_factor = sqd['conv_factor'] n_bytes = sqd['dtype'].itemsize assert n_bytes in (2, 4) # Read up to 100 MB of data at a time. blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan) with open(self._filenames[fi], 'rb', buffering=0) as fid: # extract data pointer = start * nchan * n_bytes fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer) stim = sqd['stim'] for blk_start in np.arange(0, data_left, blk_size) // nchan: blk_size = min(blk_size, data_left - blk_start * nchan) block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size) block = block.reshape(nchan, -1, order='F').astype(float) blk_stop = blk_start + block.shape[1] data_view = data[:, blk_start:blk_stop] block *= conv_factor # Create a synthetic stim channel if stim is not None: stim_ch = _make_stim_channel( block[stim, :], sqd['slope'], sqd['stimthresh'], sqd['stim_code'], stim) block = np.vstack((block, stim_ch)) _mult_cal_one(data_view, block, idx, cals, mult) # cals are all unity, so can be ignored def _default_stim_chs(info): """Return default stim channels for SQD files.""" return pick_types(info, meg=False, ref_meg=False, misc=True, exclude=[])[:8] def _make_stim_channel(trigger_chs, slope, threshold, stim_code, trigger_values): """Create synthetic stim channel from multiple trigger channels.""" if slope == '+': trig_chs_bin = trigger_chs > threshold elif slope == '-': trig_chs_bin = trigger_chs < threshold else: raise ValueError("slope needs to be '+' or '-'") # trigger value if stim_code == 'binary': trigger_values = 2 ** np.arange(len(trigger_chs)) elif stim_code != 'channel': raise ValueError("stim_code must be 'binary' or 'channel', got %s" % repr(stim_code)) trig_chs = trig_chs_bin * trigger_values[:, np.newaxis] return np.array(trig_chs.sum(axis=0), ndmin=2) class EpochsKIT(BaseEpochs): """Epochs Array object from KIT SQD file. Parameters ---------- input_fname : str Path to the sqd file. events : str | array, shape (n_events, 3) Path to events file. If array, it is the events typically returned by the read_events function. If some events don't match the events of interest as specified by event_id,they will be marked as 'IGNORED' in the drop log. event_id : int | list of int | dict | None The id of the event to consider. If dict, the keys can later be used to access associated events. Example: dict(auditory=1, visual=3). If int, a dict will be created with the id as string. If a list, all events with the IDs specified in the list are used. If None, all events will be used with and a dict is created with string integer names corresponding to the event id integers. tmin : float Start time before event. baseline : None or tuple of length 2 (default (None, 0)) The time interval to apply baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal to (None, None) all the time interval is used. The baseline (a, b) includes both endpoints, i.e. all timepoints t such that a <= t <= b. reject : dict | None Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. Example:: reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=40e-6, # V (EEG channels) eog=250e-6 # V (EOG channels) ) flat : dict | None Rejection parameters based on flatness of signal. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values are floats that set the minimum acceptable peak-to-peak amplitude. If flat is None then no rejection is done. reject_tmin : scalar | None Start of the time window used to reject epochs (with the default None, the window will start with tmin). reject_tmax : scalar | None End of the time window used to reject epochs (with the default None, the window will end with tmax). mrk : None | str | array_like, shape = (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape = (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape = (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10`000 points are in the head shape, they are automatically decimated. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Notes ----- ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the Polhemus FastScan system. hsp refers to the headshape surface points. elp refers to the points in head-space that corresponds to the HPI points. Currently, '*.elp' and '*.hsp' files are NOT supported. See Also -------- mne.Epochs : Documentation of attribute and methods. """ @verbose def __init__(self, input_fname, events, event_id=None, tmin=0, baseline=None, reject=None, flat=None, reject_tmin=None, reject_tmax=None, mrk=None, elp=None, hsp=None, allow_unknown_format=False, standardize_names=None, verbose=None): # noqa: D102 if isinstance(events, str): events = read_events(events) logger.info('Extracting KIT Parameters from %s...' % input_fname) input_fname = op.abspath(input_fname) self.info, kit_info = get_kit_info( input_fname, allow_unknown_format, standardize_names) kit_info.update(filename=input_fname) self._raw_extras = [kit_info] self._filenames = [] if len(events) != self._raw_extras[0]['n_epochs']: raise ValueError('Event list does not match number of epochs.') if self._raw_extras[0]['acq_type'] == KIT.EPOCHS: self._raw_extras[0]['data_length'] = KIT.INT else: raise TypeError('SQD file contains raw data, not epochs or ' 'average. Wrong reader.') if event_id is None: # convert to int to make typing-checks happy event_id = {str(e): int(e) for e in np.unique(events[:, 2])} for key, val in event_id.items(): if val not in events[:, 2]: raise ValueError('No matching events found for %s ' '(event id %i)' % (key, val)) data = self._read_kit_data() assert data.shape == (self._raw_extras[0]['n_epochs'], self.info['nchan'], self._raw_extras[0]['frame_length']) tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin super(EpochsKIT, self).__init__( self.info, data, events, event_id, tmin, tmax, baseline, reject=reject, flat=flat, reject_tmin=reject_tmin, reject_tmax=reject_tmax, filename=input_fname, verbose=verbose) self.info = _call_digitization( info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info) logger.info('Ready.') # MASKED: _read_kit_data function (lines 427-454) def _read_dir(fid): return dict(offset=np.fromfile(fid, np.uint32, 1)[0], size=np.fromfile(fid, np.int32, 1)[0], max_count=np.fromfile(fid, np.int32, 1)[0], count=np.fromfile(fid, np.int32, 1)[0]) @verbose def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): """Extract all the information from the sqd/con file. Parameters ---------- rawfile : str KIT file to be read. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- info : instance of Info An Info for the instance. sqd : dict A dict containing all the sqd parameter settings. """ sqd = dict() sqd['rawfile'] = rawfile unsupported_format = False sqd['dirs'] = dirs = list() with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug # # directories (0) # dirs.append(_read_dir(fid)) dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1)) assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] # # system (1) # fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset']) # check file format version version, revision = unpack('2i', fid.read(2 * KIT.INT)) if version < 2 or (version == 2 and revision < 3): version_string = "V%iR%03i" % (version, revision) if allow_unknown_format: unsupported_format = True logger.warning("Force loading KIT format %s", version_string) else: raise UnsupportedKITFormat( version_string, "SQD file format %s is not officially supported. " "Set allow_unknown_format=True to load it anyways." % (version_string,)) sysid = unpack('i', fid.read(KIT.INT))[0] # basic info system_name = unpack('128s', fid.read(128))[0].decode() # model name model_name = unpack('128s', fid.read(128))[0].decode() # channels sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0] comment = unpack('256s', fid.read(256))[0].decode() create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT)) fid.seek(KIT.INT * 3, SEEK_CUR) # reserved dewar_style = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare fll_type = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare trigger_type = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare adboard_type = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 29, SEEK_CUR) # reserved if version < 2 or (version == 2 and revision <= 3): adc_range = float(unpack('i', fid.read(KIT.INT))[0]) else: adc_range = unpack('d', fid.read(KIT.DOUBLE))[0] adc_polarity, adc_allocated, adc_stored = unpack('3i', fid.read(3 * KIT.INT)) system_name = system_name.replace('\x00', '') system_name = system_name.strip().replace('\n', '/') model_name = model_name.replace('\x00', '') model_name = model_name.strip().replace('\n', '/') full_version = f'V{version:d}R{revision:03d}' logger.debug("SQD file basic information:") logger.debug("Meg160 version = %s", full_version) logger.debug("System ID = %i", sysid) logger.debug("System name = %s", system_name) logger.debug("Model name = %s", model_name) logger.debug("Channel count = %i", channel_count) logger.debug("Comment = %s", comment) logger.debug("Dewar style = %i", dewar_style) logger.debug("FLL type = %i", fll_type) logger.debug("Trigger type = %i", trigger_type) logger.debug("A/D board type = %i", adboard_type) logger.debug("ADC range = +/-%s[V]", adc_range / 2.) logger.debug("ADC allocate = %i[bit]", adc_allocated) logger.debug("ADC bit = %i[bit]", adc_stored) # MGH description: 'acquisition (megacq) VectorView system at NMR-MGH' description = \ f'{system_name} ({sysid}) {full_version} {model_name}' sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}')) # check that we can read this file if fll_type not in KIT.FLL_SETTINGS: fll_types = sorted(KIT.FLL_SETTINGS.keys()) use_fll_type = fll_types[ np.searchsorted(fll_types, fll_type) - 1] warn('Unknown site filter settings (FLL) for system ' '"%s" model "%s" (ID %s), will assume FLL %d->%d, check ' 'your data for correctness, including channel scales and ' 'filter settings!' % (system_name, model_name, sysid, fll_type, use_fll_type)) fll_type = use_fll_type # # channel information (4) # chan_dir = dirs[KIT.DIR_INDEX_CHANNELS] chan_offset, chan_size = chan_dir['offset'], chan_dir['size'] sqd['channels'] = channels = [] exg_gains = list() for i in range(channel_count): fid.seek(chan_offset + chan_size * i) channel_type, = unpack('i', fid.read(KIT.INT)) # System 52 mislabeled reference channels as NULL. This was fixed # in system 53; not sure about 51... if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL: channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE if channel_type in KIT.CHANNELS_MEG: if channel_type not in KIT.CH_TO_FIFF_COIL: raise NotImplementedError( "KIT channel type %i can not be read. Please contact " "the mne-python developers." % channel_type) channels.append({ 'type': channel_type, # (x, y, z, theta, phi) for all MEG channels. Some channel # types have additional information which we're not using. 'loc': np.fromfile(fid, dtype='d', count=5), }) if channel_type in KIT.CHANNEL_NAME_NCHAR: fid.seek(16, SEEK_CUR) # misc fields channels[-1]['name'] = _read_name(fid, channel_type) elif channel_type in KIT.CHANNELS_MISC: channel_no, = unpack('i', fid.read(KIT.INT)) fid.seek(4, SEEK_CUR) name = _read_name(fid, channel_type) channels.append({ 'type': channel_type, 'no': channel_no, 'name': name, }) if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG): offset = 6 if channel_type == KIT.CHANNEL_EEG else 8 fid.seek(offset, SEEK_CUR) exg_gains.append(np.fromfile(fid, 'd', 1)[0]) elif channel_type == KIT.CHANNEL_NULL: channels.append({'type': channel_type}) else: raise IOError("Unknown KIT channel type: %i" % channel_type) exg_gains = np.array(exg_gains) # # Channel sensitivity information: (5) # # only sensor channels requires gain. the additional misc channels # (trigger channels, audio and voice channels) are passed # through unaffected fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset']) # (offset [Volt], gain [Tesla/Volt]) for each channel sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2) sensitivity.shape = (channel_count, 2) channel_offset, channel_gain = sensitivity.T assert (channel_offset == 0).all() # otherwise we have a problem # # amplifier gain (7) # fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset']) amp_data = unpack('i', fid.read(KIT.INT))[0] if fll_type >= 100: # Kapper Type # gain: mask bit gain1 = (amp_data & 0x00007000) >> 12 gain2 = (amp_data & 0x70000000) >> 28 gain3 = (amp_data & 0x07000000) >> 24 amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3]) # filter settings hpf = (amp_data & 0x00000700) >> 8 lpf = (amp_data & 0x00070000) >> 16 bef = (amp_data & 0x00000003) >> 0 else: # Hanger Type # gain input_gain = (amp_data & 0x1800) >> 11 output_gain = (amp_data & 0x0007) >> 0 amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain] # filter settings hpf = (amp_data & 0x007) >> 4 lpf = (amp_data & 0x0700) >> 8 bef = (amp_data & 0xc000) >> 14 hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type] sqd['highpass'] = KIT.HPFS[hpf_options][hpf] sqd['lowpass'] = KIT.LPFS[lpf_options][lpf] sqd['notch'] = KIT.BEFS[bef_options][bef] # # Acquisition Parameters (8) # fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset']) sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT)) sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE)) if acq_type == KIT.CONTINUOUS: # samples_count, = unpack('i', fid.read(KIT.INT)) fid.seek(KIT.INT, SEEK_CUR) sqd['n_samples'], = unpack('i', fid.read(KIT.INT)) elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS: sqd['frame_length'], = unpack('i', fid.read(KIT.INT)) sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT)) sqd['average_count'], = unpack('i', fid.read(KIT.INT)) sqd['n_epochs'], = unpack('i', fid.read(KIT.INT)) if acq_type == KIT.EVOKED: sqd['n_samples'] = sqd['frame_length'] else: sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs'] else: raise IOError("Invalid acquisition type: %i. Your file is neither " "continuous nor epoched data." % (acq_type,)) # # digitization information (12 and 26) # dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS] cor_dir = dirs[KIT.DIR_INDEX_COREG] dig = dict() hsp = list() if dig_dir['count'] > 0 and cor_dir['count'] > 0: # directories (0) fid.seek(dig_dir['offset']) for _ in range(dig_dir['count']): name = _read_name(fid, n=8).strip() # Sometimes there are mismatches (e.g., AFz vs AFZ) between # the channel name and its digitized, name, so let's be case # insensitive. It will also prevent collisions with HSP name = name.lower() rr = np.fromfile(fid, 'd', 3) if name: assert name not in dig dig[name] = rr else: hsp.append(rr) # nasion, lpa, rpa, HPI in native space elp = [dig.pop(key) for key in ( 'fidnz', 'fidt9', 'fidt10', 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')] if 'hpi_5' in dig and dig['hpi_5'].any(): elp.append(dig.pop('hpi_5')) elp = np.array(elp) hsp = np.array(hsp, float).reshape(-1, 3) assert elp.shape in ((7, 3), (8, 3)) # coregistration fid.seek(cor_dir['offset']) mrk = np.zeros((elp.shape[0] - 3, 3)) for _ in range(cor_dir['count']): done = np.fromfile(fid, np.int32, 1)[0] fid.seek(16 * KIT.DOUBLE + # meg_to_mri 16 * KIT.DOUBLE, # mri_to_meg SEEK_CUR) marker_count = np.fromfile(fid, np.int32, 1)[0] if not done: continue assert marker_count >= len(mrk) for mi in range(len(mrk)): mri_type, meg_type, mri_done, meg_done = \ np.fromfile(fid, np.int32, 4) assert meg_done fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos mrk[mi] = np.fromfile(fid, 'd', 3) fid.seek(256, SEEK_CUR) # marker_file (char) sqd.update(hsp=hsp, elp=elp, mrk=mrk) all_names = set(ch.get('name', '') for ch in channels) if standardize_names is None and all_names.difference({'', 'EEG'}): standardize_names = True warn('standardize_names defaults to True in 0.21 but will change ' 'to False in 0.22', DeprecationWarning) # precompute conversion factor for reading data if unsupported_format: if sysid not in LEGACY_AMP_PARAMS: raise IOError("Legacy parameters for system ID %i unavailable" % (sysid,)) adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid] is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels]) ad_to_volt = adc_range / (2 ** adc_stored) ad_to_tesla = ad_to_volt / amp_gain * channel_gain conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt) # XXX this is a bit of a hack. Should probably do this more cleanly at # some point... the 2 ** (adc_stored - 14) was emperically determined using # the test files with known amplitudes. The conv_factors need to be # replaced by these values otherwise we're off by a factor off 5000.0 # for the EEG data. is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG) for ch in channels] exg_gains /= 2 ** (adc_stored - 14) conv_factor[is_exg] = exg_gains sqd['conv_factor'] = conv_factor[:, np.newaxis] # Create raw.info dict for raw fif object with SQD data info = _empty_info(float(sqd['sfreq'])) info.update(meas_date=_stamp_to_dt((create_time, 0)), lowpass=sqd['lowpass'], highpass=sqd['highpass'], kit_system_id=sysid, description=description) # Creates a list of dicts of meg channels for raw.info logger.info('Setting channel info structure...') info['chs'] = fiff_channels = [] channel_index = defaultdict(lambda: 0) sqd['eeg_dig'] = OrderedDict() for idx, ch in enumerate(channels, 1): if ch['type'] in KIT.CHANNELS_MEG: ch_name = ch.get('name', '') if ch_name == '' or standardize_names: ch_name = 'MEG %03d' % idx # create three orthogonal vector # ch_angles[0]: theta, ch_angles[1]: phi theta, phi = np.radians(ch['loc'][3:]) x = sin(theta) * cos(phi) y = sin(theta) * sin(phi) z = cos(theta) vec_z = np.array([x, y, z]) vec_z /= linalg.norm(vec_z) vec_x = np.zeros(vec_z.size, dtype=np.float64) if vec_z[1] < vec_z[2]: if vec_z[0] < vec_z[1]: vec_x[0] = 1.0 else: vec_x[1] = 1.0 elif vec_z[0] < vec_z[2]: vec_x[0] = 1.0 else: vec_x[2] = 1.0 vec_x -= np.sum(vec_x * vec_z) * vec_z vec_x /= linalg.norm(vec_x) vec_y = np.cross(vec_z, vec_x) # transform to Neuromag like coordinate space vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) vecs = apply_trans(als_ras_trans, vecs) unit = FIFF.FIFF_UNIT_T loc = vecs.ravel() else: ch_type_label = KIT.CH_LABEL[ch['type']] channel_index[ch_type_label] += 1 ch_type_index = channel_index[ch_type_label] ch_name = ch.get('name', '') eeg_name = ch_name.lower() # some files have all EEG labeled as EEG if ch_name in ('', 'EEG') or standardize_names: ch_name = '%s %03i' % (ch_type_label, ch_type_index) unit = FIFF.FIFF_UNIT_V loc = np.zeros(12) if eeg_name and eeg_name in dig: loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name] fiff_channels.append(dict( cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE, unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name, coord_frame=FIFF.FIFFV_COORD_DEVICE, coil_type=KIT.CH_TO_FIFF_COIL[ch['type']], kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc)) info._update_redundant() return info, sqd def _read_name(fid, ch_type=None, n=None): n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type] return fid.read(n).split(b'\x00')[0].decode('utf-8') @fill_doc def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', slope='-', stimthresh=1, preload=False, stim_code='binary', allow_unknown_format=False, standardize_names=None, verbose=None): """Reader function for Ricoh/KIT conversion to FIF. Parameters ---------- input_fname : str Path to the sqd file. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. stim : list of int | '<' | '>' Channel-value correspondence when converting KIT trigger channels to a Neuromag-style stim channel. For '<', the largest values are assigned to the first channel (default). For '>', the largest values are assigned to the last channel. Can also be specified as a list of trigger channel indexes. slope : '+' | '-' How to interpret values on KIT trigger channels when synthesizing a Neuromag-style stim channel. With '+', a positive slope (low-to-high) is interpreted as an event. With '-', a negative slope (high-to-low) is interpreted as an event. stimthresh : float The threshold level for accepting voltage changes in KIT trigger channels as a trigger event. %(preload)s stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- raw : instance of RawKIT A Raw object containing KIT data. See Also -------- mne.io.Raw : Documentation of attribute and methods. Notes ----- If mrk, hsp or elp are array_like inputs, then the numbers in xyz coordinates should be in units of meters. """ return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp, stim=stim, slope=slope, stimthresh=stimthresh, preload=preload, stim_code=stim_code, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose) @fill_doc def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None, hsp=None, allow_unknown_format=False, standardize_names=None, verbose=None): """Reader function for Ricoh/KIT epochs files. Parameters ---------- input_fname : str Path to the sqd file. events : array, shape (n_events, 3) The events typically returned by the read_events function. If some events don't match the events of interest as specified by event_id, they will be marked as 'IGNORED' in the drop log. event_id : int | list of int | dict | None The id of the event to consider. If dict, the keys can later be used to access associated events. Example: dict(auditory=1, visual=3). If int, a dict will be created with the id as string. If a list, all events with the IDs specified in the list are used. If None, all events will be used with and a dict is created with string integer names corresponding to the event id integers. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- epochs : instance of Epochs The epochs. Notes ----- .. versionadded:: 0.9.0 """ epochs = EpochsKIT(input_fname=input_fname, events=events, event_id=event_id, mrk=mrk, elp=elp, hsp=hsp, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose) return epochs
def _read_kit_data(self): """Read epochs data. Returns ------- data : array, [channels x samples] the data matrix (channels x samples). times : array, [samples] returns the time values corresponding to the samples. """ info = self._raw_extras[0] epoch_length = info['frame_length'] n_epochs = info['n_epochs'] n_samples = info['n_samples'] filename = info['filename'] dtype = info['dtype'] nchan = info['nchan'] with open(filename, 'rb', buffering=0) as fid: fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset']) count = n_samples * nchan data = np.fromfile(fid, dtype=dtype, count=count) data = data.reshape((n_samples, nchan)).T data = data * info['conv_factor'] data = data.reshape((nchan, n_epochs, epoch_length)) data = data.transpose((1, 0, 2)) return data
427
454
"""Conversion tool from SQD to FIF. RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py. """ # Authors: Teon Brooks <[email protected]> # Joan Massich <[email protected]> # Christian Brodbeck <[email protected]> # # License: BSD (3-clause) from collections import defaultdict, OrderedDict from math import sin, cos from os import SEEK_CUR, path as op from struct import unpack import numpy as np from scipy import linalg from ..pick import pick_types from ...utils import (verbose, logger, warn, fill_doc, _check_option, _stamp_to_dt) from ...transforms import apply_trans, als_ras_trans from ..base import BaseRaw from ..utils import _mult_cal_one from ...epochs import BaseEpochs from ..constants import FIFF from ..meas_info import _empty_info from .constants import KIT, LEGACY_AMP_PARAMS from .coreg import read_mrk from ...event import read_events from .._digitization import _set_dig_kit def _call_digitization(info, mrk, elp, hsp, kit_info): # Use values from kit_info only if all others are None if mrk is None and elp is None and hsp is None: mrk = kit_info.get('mrk', None) elp = kit_info.get('elp', None) hsp = kit_info.get('hsp', None) # prepare mrk if isinstance(mrk, list): mrk = [read_mrk(marker) if isinstance(marker, str) else marker for marker in mrk] mrk = np.mean(mrk, axis=0) # setup digitization if mrk is not None and elp is not None and hsp is not None: dig_points, dev_head_t = _set_dig_kit( mrk, elp, hsp, kit_info['eeg_dig']) info['dig'] = dig_points info['dev_head_t'] = dev_head_t elif mrk is not None or elp is not None or hsp is not None: raise ValueError("mrk, elp and hsp need to be provided as a group " "(all or none)") return info class UnsupportedKITFormat(ValueError): """Our reader is not guaranteed to work with old files.""" def __init__(self, sqd_version, *args, **kwargs): # noqa: D102 self.sqd_version = sqd_version ValueError.__init__(self, *args, **kwargs) @fill_doc class RawKIT(BaseRaw): """Raw object from KIT SQD file. Parameters ---------- input_fname : str Path to the sqd file. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. stim : list of int | '<' | '>' | None Channel-value correspondence when converting KIT trigger channels to a Neuromag-style stim channel. For '<', the largest values are assigned to the first channel (default). For '>', the largest values are assigned to the last channel. Can also be specified as a list of trigger channel indexes. If None, no synthesized channel is generated. slope : '+' | '-' How to interpret values on KIT trigger channels when synthesizing a Neuromag-style stim channel. With '+', a positive slope (low-to-high) is interpreted as an event. With '-', a negative slope (high-to-low) is interpreted as an event. stimthresh : float The threshold level for accepting voltage changes in KIT trigger channels as a trigger event. If None, stim must also be set to None. %(preload)s stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Notes ----- ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the Polhemus FastScan system. hsp refers to the headshape surface points. elp refers to the points in head-space that corresponds to the HPI points. Currently, '*.elp' and '*.hsp' files are NOT supported. See Also -------- mne.io.Raw : Documentation of attribute and methods. """ @verbose def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>', slope='-', stimthresh=1, preload=False, stim_code='binary', allow_unknown_format=False, standardize_names=None, verbose=None): # noqa: D102 logger.info('Extracting SQD Parameters from %s...' % input_fname) input_fname = op.abspath(input_fname) self.preload = False logger.info('Creating Raw.info structure...') info, kit_info = get_kit_info( input_fname, allow_unknown_format, standardize_names) kit_info['slope'] = slope kit_info['stimthresh'] = stimthresh if kit_info['acq_type'] != KIT.CONTINUOUS: raise TypeError('SQD file contains epochs, not raw data. Wrong ' 'reader.') logger.info('Creating Info structure...') last_samps = [kit_info['n_samples'] - 1] self._raw_extras = [kit_info] self._set_stimchannels(info, stim, stim_code) super(RawKIT, self).__init__( info, preload, last_samps=last_samps, filenames=[input_fname], raw_extras=self._raw_extras, verbose=verbose) self.info = _call_digitization( info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info) logger.info('Ready.') def read_stim_ch(self, buffer_size=1e5): """Read events from data. Parameter --------- buffer_size : int The size of chunk to by which the data are scanned. Returns ------- events : array, [samples] The event vector (1 x samples). """ buffer_size = int(buffer_size) start = int(self.first_samp) stop = int(self.last_samp + 1) pick = pick_types(self.info, meg=False, ref_meg=False, stim=True, exclude=[]) stim_ch = np.empty((1, stop), dtype=np.int64) for b_start in range(start, stop, buffer_size): b_stop = b_start + buffer_size x = self[pick, b_start:b_stop][0] stim_ch[:, b_start:b_start + x.shape[1]] = x return stim_ch def _set_stimchannels(self, info, stim, stim_code): """Specify how the trigger channel is synthesized from analog channels. Has to be done before loading data. For a RawKIT instance that has been created with preload=True, this method will raise a NotImplementedError. Parameters ---------- info : instance of MeasInfo The measurement info. stim : list of int | '<' | '>' Can be submitted as list of trigger channels. If a list is not specified, the default triggers extracted from misc channels will be used with specified directionality. '<' means that largest values assigned to the first channel in sequence. '>' means the largest trigger assigned to the last channel in sequence. stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number. """ if self.preload: raise NotImplementedError("Can't change stim channel after " "loading data") _check_option('stim_code', stim_code, ['binary', 'channel']) if stim is not None: if isinstance(stim, str): picks = _default_stim_chs(info) if stim == '<': stim = picks[::-1] elif stim == '>': stim = picks else: raise ValueError("stim needs to be list of int, '>' or " "'<', not %r" % str(stim)) else: stim = np.asarray(stim, int) if stim.max() >= self._raw_extras[0]['nchan']: raise ValueError( 'Got stim=%s, but sqd file only has %i channels' % (stim, self._raw_extras[0]['nchan'])) # modify info nchan = self._raw_extras[0]['nchan'] + 1 info['chs'].append(dict( cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0, unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE, ch_name='STI 014', coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan), kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN)) info._update_redundant() self._raw_extras[0]['stim'] = stim self._raw_extras[0]['stim_code'] = stim_code def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" sqd = self._raw_extras[fi] nchan = sqd['nchan'] data_left = (stop - start) * nchan conv_factor = sqd['conv_factor'] n_bytes = sqd['dtype'].itemsize assert n_bytes in (2, 4) # Read up to 100 MB of data at a time. blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan) with open(self._filenames[fi], 'rb', buffering=0) as fid: # extract data pointer = start * nchan * n_bytes fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer) stim = sqd['stim'] for blk_start in np.arange(0, data_left, blk_size) // nchan: blk_size = min(blk_size, data_left - blk_start * nchan) block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size) block = block.reshape(nchan, -1, order='F').astype(float) blk_stop = blk_start + block.shape[1] data_view = data[:, blk_start:blk_stop] block *= conv_factor # Create a synthetic stim channel if stim is not None: stim_ch = _make_stim_channel( block[stim, :], sqd['slope'], sqd['stimthresh'], sqd['stim_code'], stim) block = np.vstack((block, stim_ch)) _mult_cal_one(data_view, block, idx, cals, mult) # cals are all unity, so can be ignored def _default_stim_chs(info): """Return default stim channels for SQD files.""" return pick_types(info, meg=False, ref_meg=False, misc=True, exclude=[])[:8] def _make_stim_channel(trigger_chs, slope, threshold, stim_code, trigger_values): """Create synthetic stim channel from multiple trigger channels.""" if slope == '+': trig_chs_bin = trigger_chs > threshold elif slope == '-': trig_chs_bin = trigger_chs < threshold else: raise ValueError("slope needs to be '+' or '-'") # trigger value if stim_code == 'binary': trigger_values = 2 ** np.arange(len(trigger_chs)) elif stim_code != 'channel': raise ValueError("stim_code must be 'binary' or 'channel', got %s" % repr(stim_code)) trig_chs = trig_chs_bin * trigger_values[:, np.newaxis] return np.array(trig_chs.sum(axis=0), ndmin=2) class EpochsKIT(BaseEpochs): """Epochs Array object from KIT SQD file. Parameters ---------- input_fname : str Path to the sqd file. events : str | array, shape (n_events, 3) Path to events file. If array, it is the events typically returned by the read_events function. If some events don't match the events of interest as specified by event_id,they will be marked as 'IGNORED' in the drop log. event_id : int | list of int | dict | None The id of the event to consider. If dict, the keys can later be used to access associated events. Example: dict(auditory=1, visual=3). If int, a dict will be created with the id as string. If a list, all events with the IDs specified in the list are used. If None, all events will be used with and a dict is created with string integer names corresponding to the event id integers. tmin : float Start time before event. baseline : None or tuple of length 2 (default (None, 0)) The time interval to apply baseline correction. If None do not apply it. If baseline is (a, b) the interval is between "a (s)" and "b (s)". If a is None the beginning of the data is used and if b is None then b is set to the end of the interval. If baseline is equal to (None, None) all the time interval is used. The baseline (a, b) includes both endpoints, i.e. all timepoints t such that a <= t <= b. reject : dict | None Rejection parameters based on peak-to-peak amplitude. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'. If reject is None then no rejection is done. Example:: reject = dict(grad=4000e-13, # T / m (gradiometers) mag=4e-12, # T (magnetometers) eeg=40e-6, # V (EEG channels) eog=250e-6 # V (EOG channels) ) flat : dict | None Rejection parameters based on flatness of signal. Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values are floats that set the minimum acceptable peak-to-peak amplitude. If flat is None then no rejection is done. reject_tmin : scalar | None Start of the time window used to reject epochs (with the default None, the window will start with tmin). reject_tmax : scalar | None End of the time window used to reject epochs (with the default None, the window will end with tmax). mrk : None | str | array_like, shape = (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape = (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape = (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10`000 points are in the head shape, they are automatically decimated. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Notes ----- ``elp`` and ``hsp`` are usually the exported text files (*.txt) from the Polhemus FastScan system. hsp refers to the headshape surface points. elp refers to the points in head-space that corresponds to the HPI points. Currently, '*.elp' and '*.hsp' files are NOT supported. See Also -------- mne.Epochs : Documentation of attribute and methods. """ @verbose def __init__(self, input_fname, events, event_id=None, tmin=0, baseline=None, reject=None, flat=None, reject_tmin=None, reject_tmax=None, mrk=None, elp=None, hsp=None, allow_unknown_format=False, standardize_names=None, verbose=None): # noqa: D102 if isinstance(events, str): events = read_events(events) logger.info('Extracting KIT Parameters from %s...' % input_fname) input_fname = op.abspath(input_fname) self.info, kit_info = get_kit_info( input_fname, allow_unknown_format, standardize_names) kit_info.update(filename=input_fname) self._raw_extras = [kit_info] self._filenames = [] if len(events) != self._raw_extras[0]['n_epochs']: raise ValueError('Event list does not match number of epochs.') if self._raw_extras[0]['acq_type'] == KIT.EPOCHS: self._raw_extras[0]['data_length'] = KIT.INT else: raise TypeError('SQD file contains raw data, not epochs or ' 'average. Wrong reader.') if event_id is None: # convert to int to make typing-checks happy event_id = {str(e): int(e) for e in np.unique(events[:, 2])} for key, val in event_id.items(): if val not in events[:, 2]: raise ValueError('No matching events found for %s ' '(event id %i)' % (key, val)) data = self._read_kit_data() assert data.shape == (self._raw_extras[0]['n_epochs'], self.info['nchan'], self._raw_extras[0]['frame_length']) tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin super(EpochsKIT, self).__init__( self.info, data, events, event_id, tmin, tmax, baseline, reject=reject, flat=flat, reject_tmin=reject_tmin, reject_tmax=reject_tmax, filename=input_fname, verbose=verbose) self.info = _call_digitization( info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info) logger.info('Ready.') def _read_kit_data(self): """Read epochs data. Returns ------- data : array, [channels x samples] the data matrix (channels x samples). times : array, [samples] returns the time values corresponding to the samples. """ info = self._raw_extras[0] epoch_length = info['frame_length'] n_epochs = info['n_epochs'] n_samples = info['n_samples'] filename = info['filename'] dtype = info['dtype'] nchan = info['nchan'] with open(filename, 'rb', buffering=0) as fid: fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset']) count = n_samples * nchan data = np.fromfile(fid, dtype=dtype, count=count) data = data.reshape((n_samples, nchan)).T data = data * info['conv_factor'] data = data.reshape((nchan, n_epochs, epoch_length)) data = data.transpose((1, 0, 2)) return data def _read_dir(fid): return dict(offset=np.fromfile(fid, np.uint32, 1)[0], size=np.fromfile(fid, np.int32, 1)[0], max_count=np.fromfile(fid, np.int32, 1)[0], count=np.fromfile(fid, np.int32, 1)[0]) @verbose def get_kit_info(rawfile, allow_unknown_format, standardize_names=None, verbose=None): """Extract all the information from the sqd/con file. Parameters ---------- rawfile : str KIT file to be read. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- info : instance of Info An Info for the instance. sqd : dict A dict containing all the sqd parameter settings. """ sqd = dict() sqd['rawfile'] = rawfile unsupported_format = False sqd['dirs'] = dirs = list() with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug # # directories (0) # dirs.append(_read_dir(fid)) dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1)) assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count'] # # system (1) # fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset']) # check file format version version, revision = unpack('2i', fid.read(2 * KIT.INT)) if version < 2 or (version == 2 and revision < 3): version_string = "V%iR%03i" % (version, revision) if allow_unknown_format: unsupported_format = True logger.warning("Force loading KIT format %s", version_string) else: raise UnsupportedKITFormat( version_string, "SQD file format %s is not officially supported. " "Set allow_unknown_format=True to load it anyways." % (version_string,)) sysid = unpack('i', fid.read(KIT.INT))[0] # basic info system_name = unpack('128s', fid.read(128))[0].decode() # model name model_name = unpack('128s', fid.read(128))[0].decode() # channels sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0] comment = unpack('256s', fid.read(256))[0].decode() create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT)) fid.seek(KIT.INT * 3, SEEK_CUR) # reserved dewar_style = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare fll_type = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare trigger_type = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 3, SEEK_CUR) # spare adboard_type = unpack('i', fid.read(KIT.INT))[0] fid.seek(KIT.INT * 29, SEEK_CUR) # reserved if version < 2 or (version == 2 and revision <= 3): adc_range = float(unpack('i', fid.read(KIT.INT))[0]) else: adc_range = unpack('d', fid.read(KIT.DOUBLE))[0] adc_polarity, adc_allocated, adc_stored = unpack('3i', fid.read(3 * KIT.INT)) system_name = system_name.replace('\x00', '') system_name = system_name.strip().replace('\n', '/') model_name = model_name.replace('\x00', '') model_name = model_name.strip().replace('\n', '/') full_version = f'V{version:d}R{revision:03d}' logger.debug("SQD file basic information:") logger.debug("Meg160 version = %s", full_version) logger.debug("System ID = %i", sysid) logger.debug("System name = %s", system_name) logger.debug("Model name = %s", model_name) logger.debug("Channel count = %i", channel_count) logger.debug("Comment = %s", comment) logger.debug("Dewar style = %i", dewar_style) logger.debug("FLL type = %i", fll_type) logger.debug("Trigger type = %i", trigger_type) logger.debug("A/D board type = %i", adboard_type) logger.debug("ADC range = +/-%s[V]", adc_range / 2.) logger.debug("ADC allocate = %i[bit]", adc_allocated) logger.debug("ADC bit = %i[bit]", adc_stored) # MGH description: 'acquisition (megacq) VectorView system at NMR-MGH' description = \ f'{system_name} ({sysid}) {full_version} {model_name}' sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}')) # check that we can read this file if fll_type not in KIT.FLL_SETTINGS: fll_types = sorted(KIT.FLL_SETTINGS.keys()) use_fll_type = fll_types[ np.searchsorted(fll_types, fll_type) - 1] warn('Unknown site filter settings (FLL) for system ' '"%s" model "%s" (ID %s), will assume FLL %d->%d, check ' 'your data for correctness, including channel scales and ' 'filter settings!' % (system_name, model_name, sysid, fll_type, use_fll_type)) fll_type = use_fll_type # # channel information (4) # chan_dir = dirs[KIT.DIR_INDEX_CHANNELS] chan_offset, chan_size = chan_dir['offset'], chan_dir['size'] sqd['channels'] = channels = [] exg_gains = list() for i in range(channel_count): fid.seek(chan_offset + chan_size * i) channel_type, = unpack('i', fid.read(KIT.INT)) # System 52 mislabeled reference channels as NULL. This was fixed # in system 53; not sure about 51... if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL: channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE if channel_type in KIT.CHANNELS_MEG: if channel_type not in KIT.CH_TO_FIFF_COIL: raise NotImplementedError( "KIT channel type %i can not be read. Please contact " "the mne-python developers." % channel_type) channels.append({ 'type': channel_type, # (x, y, z, theta, phi) for all MEG channels. Some channel # types have additional information which we're not using. 'loc': np.fromfile(fid, dtype='d', count=5), }) if channel_type in KIT.CHANNEL_NAME_NCHAR: fid.seek(16, SEEK_CUR) # misc fields channels[-1]['name'] = _read_name(fid, channel_type) elif channel_type in KIT.CHANNELS_MISC: channel_no, = unpack('i', fid.read(KIT.INT)) fid.seek(4, SEEK_CUR) name = _read_name(fid, channel_type) channels.append({ 'type': channel_type, 'no': channel_no, 'name': name, }) if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG): offset = 6 if channel_type == KIT.CHANNEL_EEG else 8 fid.seek(offset, SEEK_CUR) exg_gains.append(np.fromfile(fid, 'd', 1)[0]) elif channel_type == KIT.CHANNEL_NULL: channels.append({'type': channel_type}) else: raise IOError("Unknown KIT channel type: %i" % channel_type) exg_gains = np.array(exg_gains) # # Channel sensitivity information: (5) # # only sensor channels requires gain. the additional misc channels # (trigger channels, audio and voice channels) are passed # through unaffected fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset']) # (offset [Volt], gain [Tesla/Volt]) for each channel sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2) sensitivity.shape = (channel_count, 2) channel_offset, channel_gain = sensitivity.T assert (channel_offset == 0).all() # otherwise we have a problem # # amplifier gain (7) # fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset']) amp_data = unpack('i', fid.read(KIT.INT))[0] if fll_type >= 100: # Kapper Type # gain: mask bit gain1 = (amp_data & 0x00007000) >> 12 gain2 = (amp_data & 0x70000000) >> 28 gain3 = (amp_data & 0x07000000) >> 24 amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3]) # filter settings hpf = (amp_data & 0x00000700) >> 8 lpf = (amp_data & 0x00070000) >> 16 bef = (amp_data & 0x00000003) >> 0 else: # Hanger Type # gain input_gain = (amp_data & 0x1800) >> 11 output_gain = (amp_data & 0x0007) >> 0 amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain] # filter settings hpf = (amp_data & 0x007) >> 4 lpf = (amp_data & 0x0700) >> 8 bef = (amp_data & 0xc000) >> 14 hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type] sqd['highpass'] = KIT.HPFS[hpf_options][hpf] sqd['lowpass'] = KIT.LPFS[lpf_options][lpf] sqd['notch'] = KIT.BEFS[bef_options][bef] # # Acquisition Parameters (8) # fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset']) sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT)) sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE)) if acq_type == KIT.CONTINUOUS: # samples_count, = unpack('i', fid.read(KIT.INT)) fid.seek(KIT.INT, SEEK_CUR) sqd['n_samples'], = unpack('i', fid.read(KIT.INT)) elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS: sqd['frame_length'], = unpack('i', fid.read(KIT.INT)) sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT)) sqd['average_count'], = unpack('i', fid.read(KIT.INT)) sqd['n_epochs'], = unpack('i', fid.read(KIT.INT)) if acq_type == KIT.EVOKED: sqd['n_samples'] = sqd['frame_length'] else: sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs'] else: raise IOError("Invalid acquisition type: %i. Your file is neither " "continuous nor epoched data." % (acq_type,)) # # digitization information (12 and 26) # dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS] cor_dir = dirs[KIT.DIR_INDEX_COREG] dig = dict() hsp = list() if dig_dir['count'] > 0 and cor_dir['count'] > 0: # directories (0) fid.seek(dig_dir['offset']) for _ in range(dig_dir['count']): name = _read_name(fid, n=8).strip() # Sometimes there are mismatches (e.g., AFz vs AFZ) between # the channel name and its digitized, name, so let's be case # insensitive. It will also prevent collisions with HSP name = name.lower() rr = np.fromfile(fid, 'd', 3) if name: assert name not in dig dig[name] = rr else: hsp.append(rr) # nasion, lpa, rpa, HPI in native space elp = [dig.pop(key) for key in ( 'fidnz', 'fidt9', 'fidt10', 'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')] if 'hpi_5' in dig and dig['hpi_5'].any(): elp.append(dig.pop('hpi_5')) elp = np.array(elp) hsp = np.array(hsp, float).reshape(-1, 3) assert elp.shape in ((7, 3), (8, 3)) # coregistration fid.seek(cor_dir['offset']) mrk = np.zeros((elp.shape[0] - 3, 3)) for _ in range(cor_dir['count']): done = np.fromfile(fid, np.int32, 1)[0] fid.seek(16 * KIT.DOUBLE + # meg_to_mri 16 * KIT.DOUBLE, # mri_to_meg SEEK_CUR) marker_count = np.fromfile(fid, np.int32, 1)[0] if not done: continue assert marker_count >= len(mrk) for mi in range(len(mrk)): mri_type, meg_type, mri_done, meg_done = \ np.fromfile(fid, np.int32, 4) assert meg_done fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos mrk[mi] = np.fromfile(fid, 'd', 3) fid.seek(256, SEEK_CUR) # marker_file (char) sqd.update(hsp=hsp, elp=elp, mrk=mrk) all_names = set(ch.get('name', '') for ch in channels) if standardize_names is None and all_names.difference({'', 'EEG'}): standardize_names = True warn('standardize_names defaults to True in 0.21 but will change ' 'to False in 0.22', DeprecationWarning) # precompute conversion factor for reading data if unsupported_format: if sysid not in LEGACY_AMP_PARAMS: raise IOError("Legacy parameters for system ID %i unavailable" % (sysid,)) adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid] is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels]) ad_to_volt = adc_range / (2 ** adc_stored) ad_to_tesla = ad_to_volt / amp_gain * channel_gain conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt) # XXX this is a bit of a hack. Should probably do this more cleanly at # some point... the 2 ** (adc_stored - 14) was emperically determined using # the test files with known amplitudes. The conv_factors need to be # replaced by these values otherwise we're off by a factor off 5000.0 # for the EEG data. is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG) for ch in channels] exg_gains /= 2 ** (adc_stored - 14) conv_factor[is_exg] = exg_gains sqd['conv_factor'] = conv_factor[:, np.newaxis] # Create raw.info dict for raw fif object with SQD data info = _empty_info(float(sqd['sfreq'])) info.update(meas_date=_stamp_to_dt((create_time, 0)), lowpass=sqd['lowpass'], highpass=sqd['highpass'], kit_system_id=sysid, description=description) # Creates a list of dicts of meg channels for raw.info logger.info('Setting channel info structure...') info['chs'] = fiff_channels = [] channel_index = defaultdict(lambda: 0) sqd['eeg_dig'] = OrderedDict() for idx, ch in enumerate(channels, 1): if ch['type'] in KIT.CHANNELS_MEG: ch_name = ch.get('name', '') if ch_name == '' or standardize_names: ch_name = 'MEG %03d' % idx # create three orthogonal vector # ch_angles[0]: theta, ch_angles[1]: phi theta, phi = np.radians(ch['loc'][3:]) x = sin(theta) * cos(phi) y = sin(theta) * sin(phi) z = cos(theta) vec_z = np.array([x, y, z]) vec_z /= linalg.norm(vec_z) vec_x = np.zeros(vec_z.size, dtype=np.float64) if vec_z[1] < vec_z[2]: if vec_z[0] < vec_z[1]: vec_x[0] = 1.0 else: vec_x[1] = 1.0 elif vec_z[0] < vec_z[2]: vec_x[0] = 1.0 else: vec_x[2] = 1.0 vec_x -= np.sum(vec_x * vec_z) * vec_z vec_x /= linalg.norm(vec_x) vec_y = np.cross(vec_z, vec_x) # transform to Neuromag like coordinate space vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z)) vecs = apply_trans(als_ras_trans, vecs) unit = FIFF.FIFF_UNIT_T loc = vecs.ravel() else: ch_type_label = KIT.CH_LABEL[ch['type']] channel_index[ch_type_label] += 1 ch_type_index = channel_index[ch_type_label] ch_name = ch.get('name', '') eeg_name = ch_name.lower() # some files have all EEG labeled as EEG if ch_name in ('', 'EEG') or standardize_names: ch_name = '%s %03i' % (ch_type_label, ch_type_index) unit = FIFF.FIFF_UNIT_V loc = np.zeros(12) if eeg_name and eeg_name in dig: loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name] fiff_channels.append(dict( cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE, unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name, coord_frame=FIFF.FIFFV_COORD_DEVICE, coil_type=KIT.CH_TO_FIFF_COIL[ch['type']], kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc)) info._update_redundant() return info, sqd def _read_name(fid, ch_type=None, n=None): n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type] return fid.read(n).split(b'\x00')[0].decode('utf-8') @fill_doc def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>', slope='-', stimthresh=1, preload=False, stim_code='binary', allow_unknown_format=False, standardize_names=None, verbose=None): """Reader function for Ricoh/KIT conversion to FIF. Parameters ---------- input_fname : str Path to the sqd file. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. stim : list of int | '<' | '>' Channel-value correspondence when converting KIT trigger channels to a Neuromag-style stim channel. For '<', the largest values are assigned to the first channel (default). For '>', the largest values are assigned to the last channel. Can also be specified as a list of trigger channel indexes. slope : '+' | '-' How to interpret values on KIT trigger channels when synthesizing a Neuromag-style stim channel. With '+', a positive slope (low-to-high) is interpreted as an event. With '-', a negative slope (high-to-low) is interpreted as an event. stimthresh : float The threshold level for accepting voltage changes in KIT trigger channels as a trigger event. %(preload)s stim_code : 'binary' | 'channel' How to decode trigger values from stim channels. 'binary' read stim channel events as binary code, 'channel' encodes channel number. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- raw : instance of RawKIT A Raw object containing KIT data. See Also -------- mne.io.Raw : Documentation of attribute and methods. Notes ----- If mrk, hsp or elp are array_like inputs, then the numbers in xyz coordinates should be in units of meters. """ return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp, stim=stim, slope=slope, stimthresh=stimthresh, preload=preload, stim_code=stim_code, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose) @fill_doc def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None, hsp=None, allow_unknown_format=False, standardize_names=None, verbose=None): """Reader function for Ricoh/KIT epochs files. Parameters ---------- input_fname : str Path to the sqd file. events : array, shape (n_events, 3) The events typically returned by the read_events function. If some events don't match the events of interest as specified by event_id, they will be marked as 'IGNORED' in the drop log. event_id : int | list of int | dict | None The id of the event to consider. If dict, the keys can later be used to access associated events. Example: dict(auditory=1, visual=3). If int, a dict will be created with the id as string. If a list, all events with the IDs specified in the list are used. If None, all events will be used with and a dict is created with string integer names corresponding to the event id integers. mrk : None | str | array_like, shape (5, 3) | list of str or array_like Marker points representing the location of the marker coils with respect to the MEG Sensors, or path to a marker file. If list, all of the markers will be averaged together. elp : None | str | array_like, shape (8, 3) Digitizer points representing the location of the fiducials and the marker coils with respect to the digitized head shape, or path to a file containing these points. hsp : None | str | array, shape (n_points, 3) Digitizer head shape points, or path to head shape file. If more than 10,000 points are in the head shape, they are automatically decimated. allow_unknown_format : bool Force reading old data that is not officially supported. Alternatively, read and re-save the data with the KIT MEG Laboratory application. %(standardize_names)s %(verbose)s Returns ------- epochs : instance of Epochs The epochs. Notes ----- .. versionadded:: 0.9.0 """ epochs = EpochsKIT(input_fname=input_fname, events=events, event_id=event_id, mrk=mrk, elp=elp, hsp=hsp, allow_unknown_format=allow_unknown_format, standardize_names=standardize_names, verbose=verbose) return epochs
get_home_envvars
Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema"
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) # MASKED: get_home_envvars function (lines 232-251) def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ}
232
251
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
rmtree
To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) # MASKED: rmtree function (lines 469-518) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path)
469
518
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
ensure_bytes
Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out # MASKED: ensure_bytes function (lines 792-804) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding)
792
804
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
ensure_unicode
Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) # MASKED: ensure_unicode function (lines 807-851) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding)
807
851
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
unique
Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) # MASKED: unique function (lines 900-933) def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()
def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out
900
933
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- # ex: set sts=4 ts=4 sw=4 et: # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the datalad package for the # copyright and license terms. # # ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## import collections from collections.abc import Callable import re import builtins import time import logging import shutil import os import sys import tempfile from tempfile import NamedTemporaryFile import platform import gc import glob import gzip import stat import string import warnings import os.path as op from copy import copy as shallow_copy from contextlib import contextmanager from functools import ( lru_cache, wraps, ) from time import sleep import inspect from itertools import tee # this import is required because other modules import opj from here. from os.path import join as opj from os.path import ( abspath, basename, commonprefix, curdir, dirname, exists, expanduser, expandvars, isabs, isdir, islink, lexists, normpath, pardir, relpath, sep, split, splitdrive ) import posixpath from shlex import ( quote as shlex_quote, split as shlex_split, ) # from datalad.dochelpers import get_docstring_split from datalad.consts import TIMESTAMP_FMT from datalad.support.exceptions import CapturedException unicode_srctypes = str, bytes lgr = logging.getLogger("datalad.utils") lgr.log(5, "Importing datalad.utils") # # Some useful variables # platform_system = platform.system().lower() on_windows = platform_system == 'windows' on_osx = platform_system == 'darwin' on_linux = platform_system == 'linux' on_msys_tainted_paths = on_windows \ and 'MSYS_NO_PATHCONV' not in os.environ \ and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING') # Takes ~200msec, so should not be called at import time @lru_cache() # output should not change through life time of datalad process def get_linux_distribution(): """Compatibility wrapper for {platform,distro}.linux_distribution(). """ if hasattr(platform, "linux_distribution"): # Use deprecated (but faster) method if it's available. with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) result = platform.linux_distribution() else: import distro # We require this for Python 3.8 and above. result = distro.linux_distribution(full_distribution_name=False) return result # Those weren't used for any critical decision making, thus we just set them to None # Use get_linux_distribution() directly where needed linux_distribution_name = linux_distribution_release = None # Maximal length of cmdline string # Query the system and use hardcoded "knowledge" if None # probably getconf ARG_MAX might not be available # The last one would be the most conservative/Windows CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767 try: CMD_MAX_ARG = os.sysconf('SC_ARG_MAX') assert CMD_MAX_ARG > 0 if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6: # workaround for some kind of a bug which comes up with python 3.4 # see https://github.com/datalad/datalad/issues/3150 # or on older CentOS with conda and python as new as 3.9 # see https://github.com/datalad/datalad/issues/5943 # TODO: let Yarik know that the world is a paradise now whenever 1e6 # is not large enough CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED) except Exception as exc: # ATM (20181005) SC_ARG_MAX available only on POSIX systems # so exception would be thrown e.g. on Windows, or # somehow during Debian build for nd14.04 it is coming up with -1: # https://github.com/datalad/datalad/issues/3015 CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED lgr.debug( "Failed to query or got useless SC_ARG_MAX sysconf, " "will use hardcoded value: %s", exc) # Even with all careful computations we do, due to necessity to account for # environment and what not, we still could not figure out "exact" way to # estimate it, but it was shown that 300k safety margin on linux was sufficient. # https://github.com/datalad/datalad/pull/2977#issuecomment-436264710 # 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% # of the length for "safety margin". We might probably still blow due to # env vars, unicode, etc... so any hard limit imho is not a proper solution CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG) lgr.debug( "Maximal length of cmdline string (adjusted for safety margin): %d", CMD_MAX_ARG) # # Little helpers # # `getargspec` has been deprecated in Python 3. ArgSpecFake = collections.namedtuple( "ArgSpecFake", ["args", "varargs", "keywords", "defaults"]) def getargspec(func, *, include_kwonlyargs=False): """Compat shim for getargspec deprecated in python 3. The main difference from inspect.getargspec (and inspect.getfullargspec for that matter) is that by using inspect.signature we are providing correct args/defaults for functools.wraps'ed functions. `include_kwonlyargs` option was added to centralize getting all args, even the ones which are kwonly (follow the ``*,``). For internal use and not advised for use in 3rd party code. Please use inspect.signature directly. """ # We use signature, and not getfullargspec, because only signature properly # "passes" args from a functools.wraps decorated function. # Note: getfullargspec works Ok on wrapt-decorated functions f_sign = inspect.signature(func) # Loop through parameters and compose argspec args4 = [[], None, None, {}] # Collect all kwonlyargs into a dedicated dict - name: default kwonlyargs = {} # shortcuts args, defaults = args4[0], args4[3] P = inspect.Parameter for p_name, p in f_sign.parameters.items(): if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD): assert not kwonlyargs # yoh: must not come after kwonlyarg args.append(p_name) if p.default is not P.empty: defaults[p_name] = p.default elif p.kind == P.VAR_POSITIONAL: args4[1] = p_name elif p.kind == P.VAR_KEYWORD: args4[2] = p_name elif p.kind == P.KEYWORD_ONLY: assert p.default is not P.empty kwonlyargs[p_name] = p.default if kwonlyargs: if not include_kwonlyargs: raise ValueError( 'Function has keyword-only parameters or annotations, either use ' 'inspect.signature() API which can support them, or provide include_kwonlyargs=True ' 'to this function' ) else: args.extend(list(kwonlyargs)) defaults.update(kwonlyargs) # harmonize defaults to how original getargspec returned them -- just a tuple args4[3] = None if not defaults else tuple(defaults.values()) return ArgSpecFake(*args4) def any_re_search(regexes, value): """Return if any of regexes (list or str) searches successfully for value""" for regex in ensure_tuple_or_list(regexes): if re.search(regex, value): return True return False def not_supported_on_windows(msg=None): """A little helper to be invoked to consistently fail whenever functionality is not supported (yet) on Windows """ if on_windows: raise NotImplementedError("This functionality is not yet implemented for Windows OS" + (": %s" % msg if msg else "")) def get_home_envvars(new_home): """Return dict with env variables to be adjusted for a new HOME Only variables found in current os.environ are adjusted. Parameters ---------- new_home: str or Path New home path, in native to OS "schema" """ new_home = str(new_home) out = {'HOME': new_home} if on_windows: # requires special handling, since it has a number of relevant variables # and also Python changed its behavior and started to respect USERPROFILE only # since python 3.8: https://bugs.python.org/issue36264 out['USERPROFILE'] = new_home out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home) return {v: val for v, val in out.items() if v in os.environ} def shortened_repr(value, l=30): try: if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__): value_repr = repr(value) if not value_repr.startswith('<') and len(value_repr) > l: value_repr = "<<%s++%d chars++%s>>" % ( value_repr[:l - 16], len(value_repr) - (l - 16 + 4), value_repr[-4:] ) elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x': raise ValueError("I hate those useless long reprs") else: raise ValueError("gimme class") except Exception as e: value_repr = "<%s>" % value.__class__.__name__.split('.')[-1] return value_repr def __auto_repr__(obj): attr_names = tuple() if hasattr(obj, '__dict__'): attr_names += tuple(obj.__dict__.keys()) if hasattr(obj, '__slots__'): attr_names += tuple(obj.__slots__) items = [] for attr in sorted(set(attr_names)): if attr.startswith('_'): continue value = getattr(obj, attr) # TODO: should we add this feature to minimize some talktative reprs # such as of URL? #if value is None: # continue items.append("%s=%s" % (attr, shortened_repr(value))) return "%s(%s)" % (obj.__class__.__name__, ', '.join(items)) def auto_repr(cls): """Decorator for a class to assign it an automagic quick and dirty __repr__ It uses public class attributes to prepare repr of a class Original idea: http://stackoverflow.com/a/27799004/1265472 """ cls.__repr__ = __auto_repr__ return cls def _is_stream_tty(stream): try: # TODO: check on windows if hasattr check would work correctly and # add value: return stream.isatty() except ValueError as exc: # Who knows why it is a ValueError, but let's try to be specific # If there is a problem with I/O - non-interactive, otherwise reraise if "I/O" in str(exc): return False raise def is_interactive(): """Return True if all in/outs are open and tty. Note that in a somewhat abnormal case where e.g. stdin is explicitly closed, and any operation on it would raise a `ValueError("I/O operation on closed file")` exception, this function would just return False, since the session cannot be used interactively. """ return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr)) def get_ipython_shell(): """Detect if running within IPython and returns its `ip` (shell) object Returns None if not under ipython (no `get_ipython` function) """ try: return get_ipython() except NameError: return None def md5sum(filename): """Compute an MD5 sum for the given file """ from datalad.support.digests import Digester return Digester(digests=['md5'])(filename)['md5'] # unused in -core def sorted_files(path): """Return a (sorted) list of files under path """ return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files] for r, d, files in os.walk(path) if not '.git' in r], [])) _encoded_dirsep = r'\\' if on_windows else r'/' _VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) _DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % ( _encoded_dirsep, _encoded_dirsep) def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False): """Generator to find files matching regex Parameters ---------- regex: basestring exclude: basestring, optional Matches to exclude exclude_vcs: If True, excludes commonly known VCS subdirectories. If string, used as regex to exclude those files (regex: `%r`) exclude_datalad: If True, excludes files known to be datalad meta-data files (e.g. under .datalad/ subdirectory) (regex: `%r`) topdir: basestring, optional Directory where to search dirs: bool, optional Whether to match directories as well as files """ for dirpath, dirnames, filenames in os.walk(topdir): names = (dirnames + filenames) if dirs else filenames # TODO: might want to uniformize on windows to use '/' paths = (op.join(dirpath, name) for name in names) for path in filter(re.compile(regex).search, paths): path = path.rstrip(sep) if exclude and re.search(exclude, path): continue if exclude_vcs and re.search(_VCS_REGEX, path): continue if exclude_datalad and re.search(_DATALAD_REGEX, path): continue yield path find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX) def expandpath(path, force_absolute=True): """Expand all variables and user handles in a path. By default return an absolute path """ path = expandvars(expanduser(path)) if force_absolute: path = abspath(path) return path def posix_relpath(path, start=None): """Behave like os.path.relpath, but always return POSIX paths... on any platform.""" # join POSIX style return posixpath.join( # split and relpath native style # python2.7 ntpath implementation of relpath cannot handle start=None *split( relpath(path, start=start if start is not None else ''))) def is_explicit_path(path): """Return whether a path explicitly points to a location Any absolute path, or relative path starting with either '../' or './' is assumed to indicate a location on the filesystem. Any other path format is not considered explicit.""" path = expandpath(path, force_absolute=False) return isabs(path) \ or path.startswith(os.curdir + os.sep) \ or path.startswith(os.pardir + os.sep) # handle this dance once, and import pathlib from here # in all other places from pathlib import ( Path, PurePath, PurePosixPath, ) def rotree(path, ro=True, chmod_files=True): """To make tree read-only or writable Parameters ---------- path : string Path to the tree/directory to chmod ro : bool, optional Whether to make it R/O (default) or RW chmod_files : bool, optional Whether to operate also on files (not just directories) """ if ro: chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE) else: chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD) for root, dirs, files in os.walk(path, followlinks=False): if chmod_files: for f in files: fullf = op.join(root, f) # might be the "broken" symlink which would fail to stat etc if exists(fullf): chmod(fullf) chmod(root) def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs): """To remove git-annex .git it is needed to make all files and directories writable again first Parameters ---------- path: Path or str Path to remove chmod_files : string or bool, optional Whether to make files writable also before removal. Usually it is just a matter of directories to have write permissions. If 'auto' it would chmod files on windows by default children_only : bool, optional If set, all files and subdirectories would be removed while the path itself (must be a directory) would be preserved `*args` : `**kwargs` : Passed into shutil.rmtree call """ # Give W permissions back only to directories, no need to bother with files if chmod_files == 'auto': chmod_files = on_windows # TODO: yoh thinks that if we could quickly check our Flyweight for # repos if any of them is under the path, and could call .precommit # on those to possibly stop batched processes etc, we did not have # to do it on case by case # Check for open files assert_no_open_files(path) # TODO the whole thing should be reimplemented with pathlib, but for now # at least accept Path path = str(path) if children_only: if not isdir(path): raise ValueError("Can remove children only of directories") for p in os.listdir(path): rmtree(op.join(path, p)) return if not (islink(path) or not isdir(path)): rotree(path, ro=False, chmod_files=chmod_files) if on_windows: # shutil fails to remove paths that exceed 260 characters on Windows machines # that did not enable long path support. A workaround to remove long paths # anyway is to preprend \\?\ to the path. # https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces path = r'\\?\ '.strip() + path _rmtree(path, *args, **kwargs) else: # just remove the symlink unlink(path) def rmdir(path, *args, **kwargs): """os.rmdir with our optional checking for open files""" assert_no_open_files(path) os.rmdir(path) def get_open_files(path, log_open=False): """Get open files under a path Note: This function is very slow on Windows. Parameters ---------- path : str File or directory to check for open files under log_open : bool or int If set - logger level to use Returns ------- dict path : pid """ # Original idea: https://stackoverflow.com/a/11115521/1265472 import psutil files = {} # since the ones returned by psutil would not be aware of symlinks in the # path we should also get realpath for path # do absolute() in addition to always get an absolute path # even with non-existing paths on windows path = str(Path(path).resolve().absolute()) for proc in psutil.process_iter(): try: open_paths = [p.path for p in proc.open_files()] + [proc.cwd()] for p in open_paths: # note: could be done more efficiently so we do not # renormalize path over and over again etc if path_startswith(p, path): files[p] = proc # Catch a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess: pass except psutil.AccessDenied: pass if files and log_open: lgr.log(log_open, "Open files under %s: %s", path, files) return files _assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES') if _assert_no_open_files_cfg: def assert_no_open_files(path): files = get_open_files(path, log_open=40) if _assert_no_open_files_cfg == 'assert': assert not files, "Got following files still open: %s" % ','.join(files) elif files: if _assert_no_open_files_cfg == 'pdb': import pdb pdb.set_trace() elif _assert_no_open_files_cfg == 'epdb': import epdb epdb.serve() pass # otherwise we would just issue that error message in the log else: def assert_no_open_files(*args, **kwargs): pass def rmtemp(f, *args, **kwargs): """Wrapper to centralize removing of temp files so we could keep them around It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP environment variable is defined """ if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'): if not os.path.lexists(f): lgr.debug("Path %s does not exist, so can't be removed", f) return lgr.log(5, "Removing temp file: %s", f) # Can also be a directory if isdir(f): rmtree(f, *args, **kwargs) else: unlink(f) else: lgr.info("Keeping temp file: %s", f) def file_basename(name, return_ext=False): """ Strips up to 2 extensions of length up to 4 characters and starting with alpha not a digit, so we could get rid of .tar.gz etc """ bname = basename(name) fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname) if return_ext: return fbname, bname[len(fbname) + 1:] else: return fbname # unused in -core def escape_filename(filename): """Surround filename in "" and escape " in the filename """ filename = filename.replace('"', r'\"').replace('`', r'\`') filename = '"%s"' % filename return filename # unused in -core def encode_filename(filename): """Encode unicode filename """ if isinstance(filename, str): return filename.encode(sys.getfilesystemencoding()) else: return filename # unused in -core def decode_input(s): """Given input string/bytes, decode according to stdin codepage (or UTF-8) if not defined If fails -- issue warning and decode allowing for errors being replaced """ if isinstance(s, str): return s else: encoding = sys.stdin.encoding or 'UTF-8' try: return s.decode(encoding) except UnicodeDecodeError as exc: lgr.warning( "Failed to decode input string using %s encoding. " "Decoding allowing for errors", encoding) return s.decode(encoding, errors='replace') # unused in -core if on_windows: def lmtime(filepath, mtime): """Set mtime for files. On Windows a merely adapter to os.utime """ os.utime(filepath, (time.time(), mtime)) else: def lmtime(filepath, mtime): """Set mtime for files, while not de-referencing symlinks. To overcome absence of os.lutime Works only on linux and OSX ATM """ from .cmd import WitlessRunner # convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime)) lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime) WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath]) filepath = Path(filepath) rfilepath = filepath.resolve() if filepath.is_symlink() and rfilepath.exists(): # trust no one - adjust also of the target file # since it seemed like downloading under OSX (was it using curl?) # didn't bother with timestamps lgr.log(3, "File is a symlink to %s Setting mtime for it to %s", rfilepath, mtime) os.utime(str(rfilepath), (time.time(), mtime)) # doesn't work on OSX # Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) def ensure_tuple_or_list(obj): """Given an object, wrap into a tuple if not list or tuple """ if isinstance(obj, (list, tuple)): return obj return (obj,) def ensure_iter(s, cls, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything cls: class Which iterable class to ensure copy: bool, optional If correct iterable is passed, it would generate its shallow copy iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ if isinstance(s, cls): return s if not copy else shallow_copy(s) elif isinstance(s, str): return cls((s,)) elif iterate and hasattr(s, '__iter__'): return cls(s) elif s is None: return cls() else: return cls((s,)) def ensure_list(s, copy=False, iterate=True): """Given not a list, would place it into a list. If None - empty list is returned Parameters ---------- s: list or anything copy: bool, optional If list is passed, it would generate a shallow copy of the list iterate: bool, optional If it is not a list, but something iterable (but not a str) iterate over it. """ return ensure_iter(s, list, copy=copy, iterate=iterate) def ensure_list_from_str(s, sep='\n'): """Given a multiline string convert it to a list of return None if empty Parameters ---------- s: str or list """ if not s: return None if isinstance(s, list): return s return s.split(sep) def ensure_dict_from_str(s, **kwargs): """Given a multiline string with key=value items convert it to a dictionary Parameters ---------- s: str or dict Returns None if input s is empty """ if not s: return None if isinstance(s, dict): return s out = {} for value_str in ensure_list_from_str(s, **kwargs): if '=' not in value_str: raise ValueError("{} is not in key=value format".format(repr(value_str))) k, v = value_str.split('=', 1) if k in out: err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v) raise ValueError(err) out[k] = v return out def ensure_bytes(s, encoding='utf-8'): """Convert/encode unicode string to bytes. If `s` isn't a string, return it as is. Parameters ---------- encoding: str, optional Encoding to use. "utf-8" is the default """ if not isinstance(s, str): return s return s.encode(encoding) def ensure_unicode(s, encoding=None, confidence=None): """Convert/decode bytestring to unicode. If `s` isn't a bytestring, return it as is. Parameters ---------- encoding: str, optional Encoding to use. If None, "utf-8" is tried, and then if not a valid UTF-8, encoding will be guessed confidence: float, optional A value between 0 and 1, so if guessing of encoding is of lower than specified confidence, ValueError is raised """ if not isinstance(s, bytes): return s if encoding is None: # Figure out encoding, defaulting to 'utf-8' which is our common # target in contemporary digital society try: return s.decode('utf-8') except UnicodeDecodeError as exc: lgr.debug("Failed to decode a string as utf-8: %s", CapturedException(exc)) # And now we could try to guess from chardet import detect enc = detect(s) denc = enc.get('encoding', None) if denc: denc_confidence = enc.get('confidence', 0) if confidence is not None and denc_confidence < confidence: raise ValueError( "Failed to auto-detect encoding with high enough " "confidence. Highest confidence was %s for %s" % (denc_confidence, denc) ) lgr.log(5, "Auto-detected encoding to be %s", denc) return s.decode(denc) else: raise ValueError( "Could not decode value as utf-8, or to guess its encoding: %s" % repr(s) ) else: return s.decode(encoding) def ensure_bool(s): """Convert value into boolean following convention for strings to recognize on,True,yes as True, off,False,no as False """ if isinstance(s, str): if s.isdigit(): return bool(int(s)) sl = s.lower() if sl in {'y', 'yes', 'true', 'on'}: return True elif sl in {'n', 'no', 'false', 'off'}: return False else: raise ValueError("Do not know how to treat %r as a boolean" % s) return bool(s) def as_unicode(val, cast_types=object): """Given an arbitrary value, would try to obtain unicode value of it For unicode it would return original value, for python2 str or python3 bytes it would use ensure_unicode, for None - an empty (unicode) string, and for any other type (see `cast_types`) - would apply the unicode constructor. If value is not an instance of `cast_types`, TypeError is thrown Parameters ---------- cast_types: type Which types to cast to unicode by providing to constructor """ if val is None: return u'' elif isinstance(val, str): return val elif isinstance(val, unicode_srctypes): return ensure_unicode(val) elif isinstance(val, cast_types): return str(val) else: raise TypeError( "Value %r is not of any of known or provided %s types" % (val, cast_types)) def unique(seq, key=None, reverse=False): """Given a sequence return a list only with unique elements while maintaining order This is the fastest solution. See https://www.peterbe.com/plog/uniqifiers-benchmark and http://stackoverflow.com/a/480227/1265472 for more information. Enhancement -- added ability to compare for uniqueness using a key function Parameters ---------- seq: Sequence to analyze key: callable, optional Function to call on each element so we could decide not on a full element, but on its member etc reverse: bool, optional If True, uniqueness checked in the reverse order, so that the later ones will take the order """ seen = set() seen_add = seen.add trans = reversed if reverse else lambda x: x if not key: out = [x for x in trans(seq) if not (x in seen or seen_add(x))] else: # OPT: could be optimized, since key is called twice, but for our cases # should be just as fine out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))] return out[::-1] if reverse else out def all_same(items): """Quick check if all items are the same. Identical to a check like len(set(items)) == 1 but should be more efficient while working on generators, since would return False as soon as any difference detected thus possibly avoiding unnecessary evaluations """ first = True first_item = None for item in items: if first: first = False first_item = item else: if item != first_item: return False # So we return False if was empty return not first def map_items(func, v): """A helper to apply `func` to all elements (keys and values) within dict No type checking of values passed to func is done, so `func` should be resilient to values which it should not handle Initial usecase - apply_recursive(url_fragment, ensure_unicode) """ # map all elements within item return v.__class__( item.__class__(map(func, item)) for item in v.items() ) def partition(items, predicate=bool): """Partition `items` by `predicate`. Parameters ---------- items : iterable predicate : callable A function that will be mapped over each element in `items`. The elements will partitioned based on whether the return value is false or true. Returns ------- A tuple with two generators, the first for 'false' items and the second for 'true' ones. Notes ----- Taken from Peter Otten's snippet posted at https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html """ a, b = tee((predicate(item), item) for item in items) return ((item for pred, item in a if not pred), (item for pred, item in b if pred)) def generate_chunks(container, size): """Given a container, generate chunks from it with size up to `size` """ # There could be a "smarter" solution but I think this would suffice assert size > 0, "Size should be non-0 positive" while container: yield container[:size] container = container[size:] def generate_file_chunks(files, cmd=None): """Given a list of files, generate chunks of them to avoid exceeding cmdline length Parameters ---------- files: list of str cmd: str or list of str, optional Command to account for as well """ files = ensure_list(files) cmd = ensure_list(cmd) maxl = max(map(len, files)) if files else 0 chunk_size = max( 1, # should at least be 1. If blows then - not our fault (CMD_MAX_ARG - sum((len(x) + 3) for x in cmd) - 4 # for '--' below ) // (maxl + 3) # +3 for possible quotes and a space ) # TODO: additional treatment for "too many arguments"? although # as https://github.com/datalad/datalad/issues/1883#issuecomment # -436272758 # shows there seems to be no hardcoded limit on # of arguments, # but may be we decide to go for smth like follow to be on safe side # chunk_size = min(10240 - len(cmd), chunk_size) file_chunks = generate_chunks(files, chunk_size) return file_chunks # # Generators helpers # def saved_generator(gen): """Given a generator returns two generators, where 2nd one just replays So the first one would be going through the generated items and 2nd one would be yielding saved items """ saved = [] def gen1(): for x in gen: # iterating over original generator saved.append(x) yield x def gen2(): for x in saved: # yielding saved entries yield x return gen1(), gen2() # # Decorators # # Originally better_wraps was created to provide `wrapt`-based, instead of # `functools.wraps` implementation to preserve the correct signature of the # decorated function. By using inspect.signature in our getargspec, which # works fine on `functools.wraps`ed functions, we mediated this necessity. better_wraps = wraps # Borrowed from pandas # Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team # License: BSD-3 def optional_args(decorator): """allows a decorator to take optional positional and keyword arguments. Assumes that taking a single, callable, positional argument means that it is decorating a function, i.e. something like this:: @my_decorator def function(): pass Calls decorator with decorator(f, `*args`, `**kwargs`)""" @better_wraps(decorator) def wrapper(*args, **kwargs): def dec(f): return decorator(f, *args, **kwargs) is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable) if is_decorating: f = args[0] args = [] return dec(f) else: return dec return wrapper # TODO: just provide decorators for tempfile.mk* functions. This is ugly! def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None): """Updates kwargs to be passed to tempfile. calls depending on env vars """ if tkwargs is None: tkwargs_ = {} else: # operate on a copy of tkwargs to avoid any side-effects tkwargs_ = tkwargs.copy() # TODO: don't remember why I had this one originally # if len(targs)<2 and \ if 'prefix' not in tkwargs_: tkwargs_['prefix'] = '_'.join( ['datalad_temp'] + ([prefix] if prefix else []) + ([''] if (on_windows or not wrapped) else [wrapped.__name__])) directory = os.environ.get('TMPDIR') if directory and 'dir' not in tkwargs_: tkwargs_['dir'] = directory return tkwargs_ @optional_args def line_profile(func): """Q&D helper to line profile the function and spit out stats """ import line_profiler prof = line_profiler.LineProfiler() @wraps(func) def _wrap_line_profile(*args, **kwargs): try: pfunc = prof(func) return pfunc(*args, **kwargs) finally: prof.print_stats() return _wrap_line_profile # unused in -core @optional_args def collect_method_callstats(func): """Figure out methods which call the method repeatedly on the same instance Use case(s): - .repo is expensive since does all kinds of checks. - .config is expensive transitively since it calls .repo each time TODO: - fancy one could look through the stack for the same id(self) to see if that location is already in memo. That would hint to the cases where object is not passed into underlying functions, causing them to redo the same work over and over again - ATM might flood with all "1 lines" calls which are not that informative. The underlying possibly suboptimal use might be coming from their callers. It might or not relate to the previous TODO """ from collections import defaultdict import traceback from time import time memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count # gross timing times = [] toppath = dirname(__file__) + sep @wraps(func) def _wrap_collect_method_callstats(*args, **kwargs): try: self = args[0] stack = traceback.extract_stack() caller = stack[-2] stack_sig = \ "{relpath}:{s.name}".format( s=caller, relpath=relpath(caller.filename, toppath)) sig = (id(self), stack_sig) # we will count based on id(self) + wherefrom memo[sig][caller.lineno] += 1 t0 = time() return func(*args, **kwargs) finally: times.append(time() - t0) pass def print_stats(): print("The cost of property {}:".format(func.__name__)) if not memo: print("None since no calls") return # total count counts = {k: sum(v.values()) for k,v in memo.items()} total = sum(counts.values()) ids = {self_id for (self_id, _) in memo} print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec" .format(total, len(ids), len(memo), sum(times))) # now we need to sort by value for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True): print(" {} {}: {} from {} lines" .format(self_id, caller, count, len(memo[(self_id, caller)]))) # Upon total exit we print the stats import atexit atexit.register(print_stats) return _wrap_collect_method_callstats # Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe def never_fail(f): """Assure that function never fails -- all exceptions are caught Returns `None` if function fails internally. """ @wraps(f) def wrapped_func(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: lgr.warning( "DataLad internal failure while running %s: %r. " "Please report at https://github.com/datalad/datalad/issues" % (f, e) ) if os.environ.get('DATALAD_ALLOW_FAIL', False): return f else: return wrapped_func # # Context Managers # # unused in -core @contextmanager def nothing_cm(): """Just a dummy cm to programmically switch context managers""" yield @contextmanager def swallow_outputs(): """Context manager to help consuming both stdout and stderr, and print() stdout is available as cm.out and stderr as cm.err whenever cm is the yielded context manager. Internally uses temporary files to guarantee absent side-effects of swallowing into StringIO which lacks .fileno. print mocking is necessary for some uses where sys.stdout was already bound to original sys.stdout, thus mocking it later had no effect. Overriding print function had desired effect """ class StringIOAdapter(object): """Little adapter to help getting out/err values """ def __init__(self): kw = get_tempfile_kwargs({}, prefix="outputs") self._out = NamedTemporaryFile(delete=False, mode='w', **kw) self._err = NamedTemporaryFile(delete=False, mode='w', **kw) def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if not self._out.closed: self._out.flush() return self._read(self._out) @property def err(self): if not self._err.closed: self._err.flush() return self._read(self._err) @property def handles(self): return self._out, self._err def cleanup(self): self._out.close() self._err.close() out_name = self._out.name err_name = self._err.name from datalad import cfg if cfg.getbool('datalad.log', 'outputs', default=False) \ and lgr.getEffectiveLevel() <= logging.DEBUG: for s, sname in ((self.out, 'stdout'), (self.err, 'stderr')): if s: pref = os.linesep + "| " lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref)) else: lgr.debug("Nothing was swallowed for %s", sname) del self._out del self._err gc.collect() rmtemp(out_name) rmtemp(err_name) def fake_print(*args, **kwargs): sep = kwargs.pop('sep', ' ') end = kwargs.pop('end', '\n') file = kwargs.pop('file', sys.stdout) if file in (oldout, olderr, sys.stdout, sys.stderr): # we mock try: sys.stdout.write(sep.join(args) + end) except UnicodeEncodeError as exc: lgr.error( "Failed to write to mocked stdout, got %s, continue as it " "didn't happen", exc) else: # must be some other file one -- leave it alone oldprint(*args, sep=sep, end=end, file=file) from .ui import ui # preserve -- they could have been mocked already oldprint = getattr(builtins, 'print') oldout, olderr = sys.stdout, sys.stderr olduiout = ui.out adapter = StringIOAdapter() try: sys.stdout, sys.stderr = adapter.handles ui.out = adapter.handles[0] setattr(builtins, 'print', fake_print) yield adapter finally: sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout setattr(builtins, 'print', oldprint) adapter.cleanup() @contextmanager def swallow_logs(new_level=None, file_=None, name='datalad'): """Context manager to consume all logs. """ lgr = logging.getLogger(name) # Keep old settings old_level = lgr.level old_handlers = lgr.handlers # Let's log everything into a string # TODO: generalize with the one for swallow_outputs class StringIOAdapter(object): """Little adapter to help getting out values And to stay consistent with how swallow_outputs behaves """ def __init__(self): if file_ is None: kw = get_tempfile_kwargs({}, prefix="logs") self._out = NamedTemporaryFile(mode='a', delete=False, **kw) else: out_file = file_ # PY3 requires clearly one or another. race condition possible self._out = open(out_file, 'a') self._final_out = None def _read(self, h): with open(h.name) as f: return f.read() @property def out(self): if self._final_out is not None: # we closed and cleaned up already return self._final_out else: self._out.flush() return self._read(self._out) @property def lines(self): return self.out.split('\n') @property def handle(self): return self._out def cleanup(self): # store for access while object exists self._final_out = self.out self._out.close() out_name = self._out.name del self._out gc.collect() if not file_: rmtemp(out_name) def assert_logged(self, msg=None, level=None, regex=True, **kwargs): """Provide assertion on whether a msg was logged at a given level If neither `msg` nor `level` provided, checks if anything was logged at all. Parameters ---------- msg: str, optional Message (as a regular expression, if `regex`) to be searched. If no msg provided, checks if anything was logged at a given level. level: str, optional String representing the level to be logged regex: bool, optional If False, regular `assert_in` is used **kwargs: str, optional Passed to `assert_re_in` or `assert_in` """ from datalad.tests.utils import assert_re_in from datalad.tests.utils import assert_in if regex: match = r'\[%s\] ' % level if level else r"\[\S+\] " else: match = '[%s] ' % level if level else '' if msg: match += msg if match: (assert_re_in if regex else assert_in)(match, self.out, **kwargs) else: assert not kwargs, "no kwargs to be passed anywhere" assert self.out, "Nothing was logged!?" adapter = StringIOAdapter() # TODO: it does store messages but without any formatting, i.e. even without # date/time prefix etc. IMHO it should preserve formatting in case if file_ is # set swallow_handler = logging.StreamHandler(adapter.handle) # we want to log levelname so we could test against it swallow_handler.setFormatter( logging.Formatter('[%(levelname)s] %(message)s')) swallow_handler.filters = sum([h.filters for h in old_handlers], []) lgr.handlers = [swallow_handler] if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them! lgr.handlers += old_handlers if isinstance(new_level, str): new_level = getattr(logging, new_level) if new_level is not None: lgr.setLevel(new_level) try: yield adapter # TODO: if file_ and there was an exception -- most probably worth logging it? # although ideally it should be the next log outside added to that file_ ... oh well finally: lgr.handlers = old_handlers lgr.setLevel(old_level) adapter.cleanup() # TODO: May be melt in with swallow_logs at some point: @contextmanager def disable_logger(logger=None): """context manager to temporarily disable logging This is to provide one of swallow_logs' purposes without unnecessarily creating temp files (see gh-1865) Parameters ---------- logger: Logger Logger whose handlers will be ordered to not log anything. Default: datalad's topmost Logger ('datalad') """ class NullFilter(logging.Filter): """Filter class to reject all records """ def filter(self, record): return 0 if logger is None: # default: all of datalad's logging: logger = logging.getLogger('datalad') filter_ = NullFilter(logger.name) [h.addFilter(filter_) for h in logger.handlers] try: yield logger finally: [h.removeFilter(filter_) for h in logger.handlers] # # Additional handlers # _sys_excepthook = sys.excepthook # Just in case we ever need original one def setup_exceptionhook(ipython=False): """Overloads default sys.excepthook with our exceptionhook handler. If interactive, our exceptionhook handler will invoke pdb.post_mortem; if not interactive, then invokes default handler. """ def _datalad_pdb_excepthook(type, value, tb): import traceback traceback.print_exception(type, value, tb) print() if is_interactive(): import pdb pdb.post_mortem(tb) if ipython: from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB(mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive()) else: sys.excepthook = _datalad_pdb_excepthook def ensure_dir(*args): """Make sure directory exists. Joins the list of arguments to an os-specific path to the desired directory and creates it, if it not exists yet. """ dirname = op.join(*args) if not exists(dirname): os.makedirs(dirname) return dirname def updated(d, update): """Return a copy of the input with the 'update' Primarily for updating dictionaries """ d = d.copy() d.update(update) return d _pwd_mode = None def _switch_to_getcwd(msg, *args): global _pwd_mode _pwd_mode = 'cwd' lgr.debug( msg + ". From now on will be returning os.getcwd(). Directory" " symlinks in the paths will be resolved", *args ) # TODO: we might want to mitigate by going through all flywheighted # repos and tuning up their .paths to be resolved? def getpwd(): """Try to return a CWD without dereferencing possible symlinks This function will try to use PWD environment variable to provide a current working directory, possibly with some directories along the path being symlinks to other directories. Unfortunately, PWD is used/set only by the shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify it, thus `os.getcwd()` returns path with links dereferenced. While returning current working directory based on PWD env variable we verify that the directory is the same as `os.getcwd()` after resolving all symlinks. If that verification fails, we fall back to always use `os.getcwd()`. Initial decision to either use PWD env variable or os.getcwd() is done upon the first call of this function. """ global _pwd_mode if _pwd_mode is None: # we need to decide! try: pwd = os.environ['PWD'] if on_windows and pwd and pwd.startswith('/'): # It should be a path from MSYS. # - it might start with a drive letter or not # - it seems to be "illegal" to have a single letter directories # under / path, i.e. if created - they aren't found # - 'ln -s' does not fail to create a "symlink" but it just # copies! # so we are not likely to need original PWD purpose on # those systems # Verdict: _pwd_mode = 'cwd' else: _pwd_mode = 'PWD' except KeyError: _pwd_mode = 'cwd' if _pwd_mode == 'cwd': return os.getcwd() elif _pwd_mode == 'PWD': try: cwd = os.getcwd() except OSError as exc: if "o such file" in str(exc): # directory was removed but we promised to be robust and # still report the path we might know since we are still in PWD # mode cwd = None else: raise try: pwd = os.environ['PWD'] # do absolute() in addition to always get an absolute path # even with non-existing paths on windows pwd_real = str(Path(pwd).resolve().absolute()) # This logic would fail to catch the case where chdir did happen # to the directory where current PWD is pointing to, e.g. # $> ls -ld $PWD # lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// # hopa:~/.tmp/tmp # $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' # ('/home/yoh/.tmp/tmp', '/tmp') # but I guess that should not be too harmful if cwd is not None and pwd_real != cwd: _switch_to_getcwd( "realpath of PWD=%s is %s whenever os.getcwd()=%s", pwd, pwd_real, cwd ) return cwd return pwd except KeyError: _switch_to_getcwd("PWD env variable is no longer available") return cwd # Must not happen, but may be someone # evil purges PWD from environ? else: raise RuntimeError( "Must have not got here. " "pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,) ) class chpwd(object): """Wrapper around os.chdir which also adjusts environ['PWD'] The reason is that otherwise PWD is simply inherited from the shell and we have no ability to assess directory path without dereferencing symlinks. If used as a context manager it allows to temporarily change directory to the given path """ def __init__(self, path, mkdir=False, logsuffix=''): if path: pwd = getpwd() self._prev_pwd = pwd else: self._prev_pwd = None return if not isabs(path): path = normpath(op.join(pwd, path)) if not os.path.exists(path) and mkdir: self._mkdir = True os.mkdir(path) else: self._mkdir = False lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix) os.chdir(path) # for grep people -- ok, to chdir here! os.environ['PWD'] = str(path) def __enter__(self): # nothing more to do really, chdir was in the constructor pass def __exit__(self, exc_type, exc_val, exc_tb): if self._prev_pwd: # Need to use self.__class__ so this instance, if the entire # thing mocked during the test, still would use correct chpwd self.__class__(self._prev_pwd, logsuffix="(coming back)") def dlabspath(path, norm=False): """Symlinks-in-the-cwd aware abspath os.path.abspath relies on os.getcwd() which would not know about symlinks in the path TODO: we might want to norm=True by default to match behavior of os .path.abspath? """ if not isabs(path): # if not absolute -- relative to pwd path = op.join(getpwd(), path) return normpath(path) if norm else path def with_pathsep(path): """Little helper to guarantee that path ends with /""" return path + sep if not path.endswith(sep) else path def get_path_prefix(path, pwd=None): """Get path prefix (for current directory) Returns relative path to the topdir, if we are under topdir, and if not absolute path to topdir. If `pwd` is not specified - current directory assumed """ pwd = pwd or getpwd() path = dlabspath(path) path_ = with_pathsep(path) pwd_ = with_pathsep(pwd) common = commonprefix((path_, pwd_)) if common.endswith(sep) and common in {path_, pwd_}: # we are in subdir or above the path = use relative path location_prefix = relpath(path, pwd) # if benign "here" - cut off if location_prefix in (curdir, curdir + sep): location_prefix = '' return location_prefix else: # just return absolute path return path def _get_normalized_paths(path, prefix): if isabs(path) != isabs(prefix): raise ValueError("Both paths must either be absolute or relative. " "Got %r and %r" % (path, prefix)) path = with_pathsep(path) prefix = with_pathsep(prefix) return path, prefix def path_startswith(path, prefix): """Return True if path starts with prefix path Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return path.startswith(prefix) def path_is_subpath(path, prefix): """Return True if path is a subpath of prefix It will return False if path == prefix. Parameters ---------- path: str prefix: str """ path, prefix = _get_normalized_paths(path, prefix) return (len(prefix) < len(path)) and path.startswith(prefix) def knows_annex(path): """Returns whether at a given path there is information about an annex It is just a thin wrapper around GitRepo.is_with_annex() classmethod which also checks for `path` to exist first. This includes actually present annexes, but also uninitialized ones, or even the presence of a remote annex branch. """ from os.path import exists if not exists(path): lgr.debug("No annex: test path {0} doesn't exist".format(path)) return False from datalad.support.gitrepo import GitRepo return GitRepo(path, init=False, create=False).is_with_annex() @contextmanager def make_tempfile(content=None, wrapped=None, **tkwargs): """Helper class to provide a temporary file name and remove it at the end (context manager) Parameters ---------- mkdir : bool, optional (default: False) If True, temporary directory created using tempfile.mkdtemp() content : str or bytes, optional Content to be stored in the file created wrapped : function, optional If set, function name used to prefix temporary file name `**tkwargs`: All other arguments are passed into the call to tempfile.mk{,d}temp(), and resultant temporary filename is passed as the first argument into the function t. If no 'prefix' argument is provided, it will be constructed using module and function names ('.' replaced with '_'). To change the used directory without providing keyword argument 'dir' set DATALAD_TESTS_TEMP_DIR. Examples -------- >>> from os.path import exists >>> from datalad.utils import make_tempfile >>> with make_tempfile() as fname: ... k = open(fname, 'w').write('silly test') >>> assert not exists(fname) # was removed >>> with make_tempfile(content="blah") as fname: ... assert open(fname).read() == "blah" """ if tkwargs.get('mkdir', None) and content is not None: raise ValueError("mkdir=True while providing content makes no sense") tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped) # if DATALAD_TESTS_TEMP_DIR is set, use that as directory, # let mktemp handle it otherwise. However, an explicitly provided # dir=... will override this. mkdir = tkwargs_.pop('mkdir', False) filename = {False: tempfile.mktemp, True: tempfile.mkdtemp}[mkdir](**tkwargs_) # MIH: not clear to me why we need to perform this (possibly expensive) # resolve. It was already part of the original implementation # 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f filename = Path(filename).resolve() if content: (filename.write_bytes if isinstance(content, bytes) else filename.write_text)(content) # TODO globbing below can also be done with pathlib filename = str(filename) if __debug__: lgr.debug( 'Created temporary %s named %s', 'directory' if mkdir else 'file', filename) try: yield filename finally: # glob here for all files with the same name (-suffix) # would be useful whenever we requested .img filename, # and function creates .hdr as well # MIH: this is undocumented behavior, and undesired in the general # case. it should be made conditional and explicit lsuffix = len(tkwargs_.get('suffix', '')) filename_ = lsuffix and filename[:-lsuffix] or filename filenames = glob.glob(filename_ + '*') if len(filename_) < 3 or len(filenames) > 5: # For paranoid yoh who stepped into this already ones ;-) lgr.warning("It is unlikely that it was intended to remove all" " files matching %r. Skipping" % filename_) return for f in filenames: try: rmtemp(f) except OSError: # pragma: no cover pass def _path_(*p): """Given a path in POSIX" notation, regenerate one in native to the env one""" if on_windows: return op.join(*map(lambda x: op.join(*x.split('/')), p)) else: # Assume that all others as POSIX compliant so nothing to be done return op.join(*p) def get_timestamp_suffix(time_=None, prefix='-'): """Return a time stamp (full date and time up to second) primarily to be used for generation of log files names """ args = [] if time_ is not None: if isinstance(time_, int): time_ = time.gmtime(time_) args.append(time_) return time.strftime(prefix + TIMESTAMP_FMT, *args) # unused in -core def get_logfilename(dspath, cmd='datalad'): """Return a filename to use for logging under a dataset/repository directory would be created if doesn't exist, but dspath must exist and be a directory """ assert(exists(dspath)) assert(isdir(dspath)) ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix()) def get_trace(edges, start, end, trace=None): """Return the trace/path to reach a node in a tree. Parameters ---------- edges : sequence(2-tuple) The tree given by a sequence of edges (parent, child) tuples. The nodes can be identified by any value and data type that supports the '==' operation. start : Identifier of the start node. Must be present as a value in the parent location of an edge tuple in order to be found. end : Identifier of the target/end node. Must be present as a value in the child location of an edge tuple in order to be found. trace : list Mostly useful for recursive calls, and used internally. Returns ------- None or list Returns a list with the trace to the target (the starts and the target are not included in the trace, hence if start and end are directly connected an empty list is returned), or None when no trace to the target can be found, or start and end are identical. """ # the term trace is used to avoid confusion with a path in the sense # of a filesystem path, but the analogy fits and nodes can be paths if trace is None: trace = [] if not edges: raise ValueError("no edges given") for cand in edges: cand_super, cand_sub = cand if cand_sub in trace: # only DAGs, skip any cyclic traces continue if trace and cand_super != trace[-1]: # only consider edges that lead off the end of the trace continue if not trace and cand_super != start: # we got nothing yet, and this edges is not matching the start continue if cand_sub == end: return trace # dive into potential subnodes cand_trace = get_trace( edges, start, end, trace + [cand_sub]) if cand_trace: return cand_trace return None def get_dataset_root(path): """Return the root of an existent dataset containing a given path The root path is returned in the same absolute or relative form as the input argument. If no associated dataset exists, or the input path doesn't exist, None is returned. If `path` is a symlink or something other than a directory, its the root dataset containing its parent directory will be reported. If none can be found, at a symlink at `path` is pointing to a dataset, `path` itself will be reported as the root. Parameters ---------- path : Path-like Returns ------- str or None """ path = str(path) suffix = '.git' altered = None if islink(path) or not isdir(path): altered = path path = dirname(path) apath = abspath(path) # while we can still go up while split(apath)[1]: if exists(op.join(path, suffix)): return path # new test path in the format we got it path = normpath(op.join(path, os.pardir)) # no luck, next round apath = abspath(path) # if we applied dirname() at the top, we give it another go with # the actual path, if it was itself a symlink, it could be the # top-level dataset itself if altered and exists(op.join(altered, suffix)): return altered return None # ATM used in datalad_crawler extension, so do not remove yet def try_multiple(ntrials, exception, base, f, *args, **kwargs): """Call f multiple times making exponentially growing delay between the calls""" for trial in range(1, ntrials+1): try: return f(*args, **kwargs) except exception as exc: if trial == ntrials: raise # just reraise on the last trial t = base ** trial lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) @optional_args def try_multiple_dec( f, ntrials=None, duration=0.1, exceptions=None, increment_type=None, exceptions_filter=None, logger=None, ): """Decorator to try function multiple times. Main purpose is to decorate functions dealing with removal of files/directories and which might need a few seconds to work correctly on Windows which takes its time to release files/directories. Parameters ---------- ntrials: int, optional duration: float, optional Seconds to sleep before retrying. increment_type: {None, 'exponential'} Note that if it is exponential, duration should typically be > 1.0 so it grows with higher power exceptions: Exception or tuple of Exceptions, optional Exception or a tuple of multiple exceptions, on which to retry exceptions_filter: callable, optional If provided, this function will be called with a caught exception instance. If function returns True - we will re-try, if False - exception will be re-raised without retrying. logger: callable, optional Logger to log upon failure. If not provided, will use stock logger at the level of 5 (heavy debug). """ if not exceptions: exceptions = (OSError, WindowsError, PermissionError) \ if on_windows else OSError if not ntrials: # Life goes fast on proper systems, no need to delay it much ntrials = 100 if on_windows else 10 if logger is None: def logger(*args, **kwargs): return lgr.log(5, *args, **kwargs) assert increment_type in {None, 'exponential'} @wraps(f) def _wrap_try_multiple_dec(*args, **kwargs): t = duration for trial in range(ntrials): try: return f(*args, **kwargs) except exceptions as exc: if exceptions_filter and not exceptions_filter(exc): raise if trial < ntrials - 1: if increment_type == 'exponential': t = duration ** (trial + 1) logger( "Caught %s on trial #%d. Sleeping %f and retrying", CapturedException(exc), trial, t) sleep(t) else: raise return _wrap_try_multiple_dec @try_multiple_dec def unlink(f): """'Robust' unlink. Would try multiple times On windows boxes there is evidence for a latency of more than a second until a file is considered no longer "in-use". WindowsError is not known on Linux, and if IOError or any other exception is thrown then if except statement has WindowsError in it -- NameError also see gh-2533 """ # Check for open files assert_no_open_files(f) return os.unlink(f) @try_multiple_dec def _rmtree(*args, **kwargs): """Just a helper to decorate shutil.rmtree. rmtree defined above does more and ideally should not itself be decorated since a recursive definition and does checks for open files inside etc - might be too runtime expensive """ return shutil.rmtree(*args, **kwargs) def slash_join(base, extension): """Join two strings with a '/', avoiding duplicate slashes If any of the strings is None the other is returned as is. """ if extension is None: return base if base is None: return extension return '/'.join( (base.rstrip('/'), extension.lstrip('/'))) # # IO Helpers # # unused in -core def open_r_encdetect(fname, readahead=1000): """Return a file object in read mode with auto-detected encoding This is helpful when dealing with files of unknown encoding. Parameters ---------- readahead: int, optional How many bytes to read for guessing the encoding type. If negative - full file will be read """ from chardet import detect import io # read some bytes from the file with open(fname, 'rb') as f: head = f.read(readahead) enc = detect(head) denc = enc.get('encoding', None) lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)", denc, fname, enc.get('confidence', 'unknown')) return io.open(fname, encoding=denc) def read_file(fname, decode=True): """A helper to read file passing content via ensure_unicode Parameters ---------- decode: bool, optional if False, no ensure_unicode and file content returned as bytes """ with open(fname, 'rb') as f: content = f.read() return ensure_unicode(content) if decode else content def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs): """A generator of dict records from a CSV/TSV Automatically guesses the encoding for each record to convert to UTF-8 Parameters ---------- fname: str Filename dialect: str, optional Dialect to specify to csv.reader. If not specified -- guessed from the file, if fails to guess, "excel-tab" is assumed readahead: int, optional How many bytes to read from the file to guess the type **kwargs Passed to `csv.reader` """ import csv if dialect is None: with open(fname) as tsvfile: # add robustness, use a sniffer try: dialect = csv.Sniffer().sniff(tsvfile.read(readahead)) except Exception as exc: lgr.warning( 'Could not determine file-format, assuming TSV: %s', CapturedException(exc) ) dialect = 'excel-tab' kw = dict(encoding='utf-8') with open(fname, 'r', **kw) as tsvfile: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader( tsvfile, dialect=dialect, **kwargs ) header = None for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: row_unicode = map(ensure_unicode, row) if header is None: header = list(row_unicode) else: yield dict(zip(header, row_unicode)) def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug): """Helper to import a list of modules without failing if N/A Parameters ---------- modnames: list of str List of module names to import pkg: str Package under which to import msg: str, optional Message template for .format() to log at DEBUG level if import fails. Keys {module} and {package} will be provided and ': {exception}' appended log: callable, optional Logger call to use for logging messages """ from importlib import import_module _globals = globals() mods_loaded = [] if pkg and not pkg in sys.modules: # with python 3.5.1 (ok with 3.5.5) somehow kept running into # Failed to import dlsub1: Parent module 'dltestm1' not loaded # while running the test. Preloading pkg resolved the issue import_module(pkg) for modname in modnames: try: _globals[modname] = mod = import_module( '.{}'.format(modname), pkg) mods_loaded.append(mod) except Exception as exc: from datalad.support.exceptions import CapturedException ce = CapturedException(exc) log((msg + ': {exception}').format( module=modname, package=pkg, exception=ce.message)) return mods_loaded def import_module_from_file(modpath, pkg=None, log=lgr.debug): """Import provided module given a path TODO: - RF/make use of it in pipeline.py which has similar logic - join with import_modules above? Parameters ---------- pkg: module, optional If provided, and modpath is under pkg.__path__, relative import will be used """ assert(modpath.endswith('.py')) # for now just for .py files log("Importing %s" % modpath) modname = basename(modpath)[:-3] relmodpath = None if pkg: for pkgpath in pkg.__path__: if path_is_subpath(modpath, pkgpath): # for now relying on having .py extension -- assertion above relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.') break try: if relmodpath: from importlib import import_module mod = import_module(relmodpath, pkg.__name__) else: dirname_ = dirname(modpath) try: sys.path.insert(0, dirname_) mod = __import__(modname, level=0) finally: if dirname_ in sys.path: sys.path.pop(sys.path.index(dirname_)) else: log("Expected path %s to be within sys.path, but it was gone!" % dirname_) except Exception as e: raise RuntimeError( "Failed to import module from %s" % modpath) from e return mod def get_encoding_info(): """Return a dictionary with various encoding/locale information""" import sys, locale from collections import OrderedDict return OrderedDict([ ('default', sys.getdefaultencoding()), ('filesystem', sys.getfilesystemencoding()), ('locale.prefered', locale.getpreferredencoding()), ]) def get_envvars_info(): from collections import OrderedDict envs = [] for var, val in os.environ.items(): if ( var.startswith('PYTHON') or var.startswith('LC_') or var.startswith('GIT_') or var in ('LANG', 'LANGUAGE', 'PATH') ): envs.append((var, val)) return OrderedDict(envs) # This class is modified from Snakemake (v5.1.4) class SequenceFormatter(string.Formatter): """string.Formatter subclass with special behavior for sequences. This class delegates formatting of individual elements to another formatter object. Non-list objects are formatted by calling the delegate formatter's "format_field" method. List-like objects (list, tuple, set, frozenset) are formatted by formatting each element of the list according to the specified format spec using the delegate formatter and then joining the resulting strings with a separator (space by default). """ def __init__(self, separator=" ", element_formatter=string.Formatter(), *args, **kwargs): self.separator = separator self.element_formatter = element_formatter def format_element(self, elem, format_spec): """Format a single element For sequences, this is called once for each element in a sequence. For anything else, it is called on the entire object. It is intended to be overridden in subclases. """ return self.element_formatter.format_field(elem, format_spec) def format_field(self, value, format_spec): if isinstance(value, (list, tuple, set, frozenset)): return self.separator.join(self.format_element(v, format_spec) for v in value) else: return self.format_element(value, format_spec) # TODO: eventually we might want to make use of attr module class File(object): """Helper for a file entry in the create_tree/@with_tree It allows to define additional settings for entries """ def __init__(self, name, executable=False): """ Parameters ---------- name : str Name of the file executable: bool, optional Make it executable """ self.name = name self.executable = executable def __str__(self): return self.name def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True): """Given an archive `name`, create under `path` with specified `load` tree """ from datalad.support.archives import compress_files dirname = file_basename(name) full_dirname = op.join(path, dirname) os.makedirs(full_dirname) create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir) # create archive if archives_leading_dir: compress_files([dirname], name, path=path, overwrite=overwrite) else: compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))), op.join(pardir, name), path=op.join(path, dirname), overwrite=overwrite) # remove original tree rmtree(full_dirname) def create_tree(path, tree, archives_leading_dir=True, remove_existing=False): """Given a list of tuples (name, load) create such a tree if load is a tuple itself -- that would create either a subtree or an archive with that content and place it into the tree if name ends with .tar.gz """ lgr.log(5, "Creating a tree under %s", path) if not exists(path): os.makedirs(path) if isinstance(tree, dict): tree = tree.items() for file_, load in tree: if isinstance(file_, File): executable = file_.executable name = file_.name else: executable = False name = file_ full_name = op.join(path, name) if remove_existing and lexists(full_name): rmtree(full_name, chmod_files=True) if isinstance(load, (tuple, list, dict)): if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'): create_tree_archive( path, name, load, archives_leading_dir=archives_leading_dir) else: create_tree( full_name, load, archives_leading_dir=archives_leading_dir, remove_existing=remove_existing) else: open_func = open if full_name.endswith('.gz'): open_func = gzip.open elif full_name.split('.')[-1] in ('xz', 'lzma'): import lzma open_func = lzma.open with open_func(full_name, "wb") as f: f.write(ensure_bytes(load, 'utf-8')) if executable: os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC) def get_suggestions_msg(values, known, sep="\n "): """Return a formatted string with suggestions for values given the known ones """ import difflib suggestions = [] for value in ensure_list(values): # might not want to do it if we change presentation below suggestions += difflib.get_close_matches(value, known) suggestions = unique(suggestions) msg = "Did you mean any of these?" if suggestions: if '\n' in sep: # if separator includes new line - we add entire separator right away msg += sep else: msg += ' ' return msg + "%s\n" % sep.join(suggestions) return '' def bytes2human(n, format='%(value).1f %(symbol)sB'): """ Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext", see: http://goo.gl/kTQMs >>> from datalad.utils import bytes2human >>> bytes2human(1) '1.0 B' >>> bytes2human(1024) '1.0 KB' >>> bytes2human(1048576) '1.0 MB' >>> bytes2human(1099511627776127398123789121) '909.5 YB' >>> bytes2human(10000, "%(value).1f %(symbol)s/sec") '9.8 K/sec' >>> # precision can be adjusted by playing with %f operator >>> bytes2human(10000, format="%(value).5f %(symbol)s") '9.76562 K' Taken from: http://goo.gl/kTQMs and subsequently simplified Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> License: MIT """ n = int(n) if n < 0: raise ValueError("n < 0") symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return format % locals() return format % dict(symbol=symbols[0], value=n) def quote_cmdlinearg(arg): """Perform platform-appropriate argument quoting""" # https://stackoverflow.com/a/15262019 return '"{}"'.format( arg.replace('"', '""') ) if on_windows else shlex_quote(arg) def guard_for_format(arg): """Replace { and } with {{ and }} To be used in cases if arg is not expected to have provided by user .format() placeholders, but 'arg' might become a part of a composite passed to .format(), e.g. via 'Run' """ return arg.replace('{', '{{').replace('}', '}}') def join_cmdline(args): """Join command line args into a string using quote_cmdlinearg """ return ' '.join(map(quote_cmdlinearg, args)) def split_cmdline(s): """Perform platform-appropriate command line splitting. Identical to `shlex.split()` on non-windows platforms. Modified from https://stackoverflow.com/a/35900070 """ if not on_windows: return shlex_split(s) # the rest is for windows RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def get_wrapped_class(wrapped): """Determine the command class a wrapped __call__ belongs to""" mod = sys.modules[wrapped.__module__] command_class_name = wrapped.__qualname__.split('.')[-2] _func_class = mod.__dict__[command_class_name] lgr.debug("Determined class of decorated function: %s", _func_class) return _func_class def _make_assure_kludge(fn): old_name = fn.__name__.replace("ensure", "assure") @wraps(fn) def compat_fn(*args, **kwargs): warnings.warn( "{} is deprecated and will be removed in a future release. " "Use {} instead." .format(old_name, fn.__name__), DeprecationWarning) return fn(*args, **kwargs) compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead." .format(fn.__name__)) return compat_fn assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list) assure_iter = _make_assure_kludge(ensure_iter) assure_list = _make_assure_kludge(ensure_list) assure_list_from_str = _make_assure_kludge(ensure_list_from_str) assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str) assure_bytes = _make_assure_kludge(ensure_bytes) assure_unicode = _make_assure_kludge(ensure_unicode) assure_bool = _make_assure_kludge(ensure_bool) assure_dir = _make_assure_kludge(ensure_dir) lgr.log(5, "Done importing datalad.utils") def check_symlink_capability(path, target): """helper similar to datalad.tests.utils.has_symlink_capability However, for use in a datalad command context, we shouldn't assume to be able to write to tmpfile and also not import a whole lot from datalad's test machinery. Finally, we want to know, whether we can create a symlink at a specific location, not just somewhere. Therefore use arbitrary path to test-build a symlink and delete afterwards. Suitable location can therefore be determined by high lever code. Parameters ---------- path: Path target: Path Returns ------- bool """ try: target.touch() path.symlink_to(target) return True except Exception: return False finally: if path.exists(): path.unlink() if target.exists(): target.unlink()